Total coverage: 251906 (17%)of 1547846
6 6 6 26 26 10 10 10 10 46 46 62 61 62 220 220 159 220 220 181 204 10836 10857 3559 10438 11134 11150 8636 9775 131 130 22 3 3 3 3 3 19 19 19 18 21 21 10 21 21 21 21 62 1094 1141 5 5 5 5 9 9 1 7 3 6 8 8 8 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 // SPDX-License-Identifier: GPL-2.0-only /* * lib/bitmap.c * Helper functions for bitmap.h. */ #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/ctype.h> #include <linux/device.h> #include <linux/export.h> #include <linux/slab.h> /** * DOC: bitmap introduction * * bitmaps provide an array of bits, implemented using an * array of unsigned longs. The number of valid bits in a * given bitmap does _not_ need to be an exact multiple of * BITS_PER_LONG. * * The possible unused bits in the last, partially used word * of a bitmap are 'don't care'. The implementation makes * no particular effort to keep them zero. It ensures that * their value will not affect the results of any operation. * The bitmap operations that return Boolean (bitmap_empty, * for example) or scalar (bitmap_weight, for example) results * carefully filter out these unused bits from impacting their * results. * * The byte ordering of bitmaps is more natural on little * endian architectures. See the big-endian headers * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h * for the best explanations of this ordering. */ bool __bitmap_equal(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits) { unsigned int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (bitmap1[k] != bitmap2[k]) return false; if (bits % BITS_PER_LONG) if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) return false; return true; } EXPORT_SYMBOL(__bitmap_equal); bool __bitmap_or_equal(const unsigned long *bitmap1, const unsigned long *bitmap2, const unsigned long *bitmap3, unsigned int bits) { unsigned int k, lim = bits / BITS_PER_LONG; unsigned long tmp; for (k = 0; k < lim; ++k) { if ((bitmap1[k] | bitmap2[k]) != bitmap3[k]) return false; } if (!(bits % BITS_PER_LONG)) return true; tmp = (bitmap1[k] | bitmap2[k]) ^ bitmap3[k]; return (tmp & BITMAP_LAST_WORD_MASK(bits)) == 0; } void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits) { unsigned int k, lim = BITS_TO_LONGS(bits); for (k = 0; k < lim; ++k) dst[k] = ~src[k]; } EXPORT_SYMBOL(__bitmap_complement); /** * __bitmap_shift_right - logical right shift of the bits in a bitmap * @dst : destination bitmap * @src : source bitmap * @shift : shift by this many bits * @nbits : bitmap size, in bits * * Shifting right (dividing) means moving bits in the MS -> LS bit * direction. Zeros are fed into the vacated MS positions and the * LS bits shifted off the bottom are lost. */ void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, unsigned shift, unsigned nbits) { unsigned k, lim = BITS_TO_LONGS(nbits); unsigned off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; unsigned long mask = BITMAP_LAST_WORD_MASK(nbits); for (k = 0; off + k < lim; ++k) { unsigned long upper, lower; /* * If shift is not word aligned, take lower rem bits of * word above and make them the top rem bits of result. */ if (!rem || off + k + 1 >= lim) upper = 0; else { upper = src[off + k + 1]; if (off + k + 1 == lim - 1) upper &= mask; upper <<= (BITS_PER_LONG - rem); } lower = src[off + k]; if (off + k == lim - 1) lower &= mask; lower >>= rem; dst[k] = lower | upper; } if (off) memset(&dst[lim - off], 0, off*sizeof(unsigned long)); } EXPORT_SYMBOL(__bitmap_shift_right); /** * __bitmap_shift_left - logical left shift of the bits in a bitmap * @dst : destination bitmap * @src : source bitmap * @shift : shift by this many bits * @nbits : bitmap size, in bits * * Shifting left (multiplying) means moving bits in the LS -> MS * direction. Zeros are fed into the vacated LS bit positions * and those MS bits shifted off the top are lost. */ void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, unsigned int shift, unsigned int nbits) { int k; unsigned int lim = BITS_TO_LONGS(nbits); unsigned int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; for (k = lim - off - 1; k >= 0; --k) { unsigned long upper, lower; /* * If shift is not word aligned, take upper rem bits of * word below and make them the bottom rem bits of result. */ if (rem && k > 0) lower = src[k - 1] >> (BITS_PER_LONG - rem); else lower = 0; upper = src[k] << rem; dst[k + off] = lower | upper; } if (off) memset(dst, 0, off*sizeof(unsigned long)); } EXPORT_SYMBOL(__bitmap_shift_left); /** * bitmap_cut() - remove bit region from bitmap and right shift remaining bits * @dst: destination bitmap, might overlap with src * @src: source bitmap * @first: start bit of region to be removed * @cut: number of bits to remove * @nbits: bitmap size, in bits * * Set the n-th bit of @dst iff the n-th bit of @src is set and * n is less than @first, or the m-th bit of @src is set for any * m such that @first <= n < nbits, and m = n + @cut. * * In pictures, example for a big-endian 32-bit architecture: * * The @src bitmap is:: * * 31 63 * | | * 10000000 11000001 11110010 00010101 10000000 11000001 01110010 00010101 * | | | | * 16 14 0 32 * * if @cut is 3, and @first is 14, bits 14-16 in @src are cut and @dst is:: * * 31 63 * | | * 10110000 00011000 00110010 00010101 00010000 00011000 00101110 01000010 * | | | * 14 (bit 17 0 32 * from @src) * * Note that @dst and @src might overlap partially or entirely. * * This is implemented in the obvious way, with a shift and carry * step for each moved bit. Optimisation is left as an exercise * for the compiler. */ void bitmap_cut(unsigned long *dst, const unsigned long *src, unsigned int first, unsigned int cut, unsigned int nbits) { unsigned int len = BITS_TO_LONGS(nbits); unsigned long keep = 0, carry; int i; if (first % BITS_PER_LONG) { keep = src[first / BITS_PER_LONG] & (~0UL >> (BITS_PER_LONG - first % BITS_PER_LONG)); } memmove(dst, src, len * sizeof(*dst)); while (cut--) { for (i = first / BITS_PER_LONG; i < len; i++) { if (i < len - 1) carry = dst[i + 1] & 1UL; else carry = 0; dst[i] = (dst[i] >> 1) | (carry << (BITS_PER_LONG - 1)); } } dst[first / BITS_PER_LONG] &= ~0UL << (first % BITS_PER_LONG); dst[first / BITS_PER_LONG] |= keep; } EXPORT_SYMBOL(bitmap_cut); bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits) { unsigned int k; unsigned int lim = bits/BITS_PER_LONG; unsigned long result = 0; for (k = 0; k < lim; k++) result |= (dst[k] = bitmap1[k] & bitmap2[k]); if (bits % BITS_PER_LONG) result |= (dst[k] = bitmap1[k] & bitmap2[k] & BITMAP_LAST_WORD_MASK(bits)); return result != 0; } EXPORT_SYMBOL(__bitmap_and); void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits) { unsigned int k; unsigned int nr = BITS_TO_LONGS(bits); for (k = 0; k < nr; k++) dst[k] = bitmap1[k] | bitmap2[k]; } EXPORT_SYMBOL(__bitmap_or); void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits) { unsigned int k; unsigned int nr = BITS_TO_LONGS(bits); for (k = 0; k < nr; k++) dst[k] = bitmap1[k] ^ bitmap2[k]; } EXPORT_SYMBOL(__bitmap_xor); bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits) { unsigned int k; unsigned int lim = bits/BITS_PER_LONG; unsigned long result = 0; for (k = 0; k < lim; k++) result |= (dst[k] = bitmap1[k] & ~bitmap2[k]); if (bits % BITS_PER_LONG) result |= (dst[k] = bitmap1[k] & ~bitmap2[k] & BITMAP_LAST_WORD_MASK(bits)); return result != 0; } EXPORT_SYMBOL(__bitmap_andnot); void __bitmap_replace(unsigned long *dst, const unsigned long *old, const unsigned long *new, const unsigned long *mask, unsigned int nbits) { unsigned int k; unsigned int nr = BITS_TO_LONGS(nbits); for (k = 0; k < nr; k++) dst[k] = (old[k] & ~mask[k]) | (new[k] & mask[k]); } EXPORT_SYMBOL(__bitmap_replace); bool __bitmap_intersects(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits) { unsigned int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (bitmap1[k] & bitmap2[k]) return true; if (bits % BITS_PER_LONG) if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) return true; return false; } EXPORT_SYMBOL(__bitmap_intersects); bool __bitmap_subset(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits) { unsigned int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (bitmap1[k] & ~bitmap2[k]) return false; if (bits % BITS_PER_LONG) if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) return false; return true; } EXPORT_SYMBOL(__bitmap_subset); #define BITMAP_WEIGHT(FETCH, bits) \ ({ \ unsigned int __bits = (bits), idx, w = 0; \ \ for (idx = 0; idx < __bits / BITS_PER_LONG; idx++) \ w += hweight_long(FETCH); \ \ if (__bits % BITS_PER_LONG) \ w += hweight_long((FETCH) & BITMAP_LAST_WORD_MASK(__bits)); \ \ w; \ }) unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits) { return BITMAP_WEIGHT(bitmap[idx], bits); } EXPORT_SYMBOL(__bitmap_weight); unsigned int __bitmap_weight_and(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits) { return BITMAP_WEIGHT(bitmap1[idx] & bitmap2[idx], bits); } EXPORT_SYMBOL(__bitmap_weight_and); unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits) { return BITMAP_WEIGHT(bitmap1[idx] & ~bitmap2[idx], bits); } EXPORT_SYMBOL(__bitmap_weight_andnot); void __bitmap_set(unsigned long *map, unsigned int start, int len) { unsigned long *p = map + BIT_WORD(start); const unsigned int size = start + len; int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); while (len - bits_to_set >= 0) { *p |= mask_to_set; len -= bits_to_set; bits_to_set = BITS_PER_LONG; mask_to_set = ~0UL; p++; } if (len) { mask_to_set &= BITMAP_LAST_WORD_MASK(size); *p |= mask_to_set; } } EXPORT_SYMBOL(__bitmap_set); void __bitmap_clear(unsigned long *map, unsigned int start, int len) { unsigned long *p = map + BIT_WORD(start); const unsigned int size = start + len; int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); while (len - bits_to_clear >= 0) { *p &= ~mask_to_clear; len -= bits_to_clear; bits_to_clear = BITS_PER_LONG; mask_to_clear = ~0UL; p++; } if (len) { mask_to_clear &= BITMAP_LAST_WORD_MASK(size); *p &= ~mask_to_clear; } } EXPORT_SYMBOL(__bitmap_clear); /** * bitmap_find_next_zero_area_off - find a contiguous aligned zero area * @map: The address to base the search on * @size: The bitmap size in bits * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @align_mask: Alignment mask for zero area * @align_offset: Alignment offset for zero area. * * The @align_mask should be one less than a power of 2; the effect is that * the bit offset of all zero areas this function finds plus @align_offset * is multiple of that power of 2. */ unsigned long bitmap_find_next_zero_area_off(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, unsigned long align_mask, unsigned long align_offset) { unsigned long index, end, i; again: index = find_next_zero_bit(map, size, start); /* Align allocation */ index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset; end = index + nr; if (end > size) return end; i = find_next_bit(map, end, index); if (i < end) { start = i + 1; goto again; } return index; } EXPORT_SYMBOL(bitmap_find_next_zero_area_off); /** * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap * @buf: pointer to a bitmap * @pos: a bit position in @buf (0 <= @pos < @nbits) * @nbits: number of valid bit positions in @buf * * Map the bit at position @pos in @buf (of length @nbits) to the * ordinal of which set bit it is. If it is not set or if @pos * is not a valid bit position, map to -1. * * If for example, just bits 4 through 7 are set in @buf, then @pos * values 4 through 7 will get mapped to 0 through 3, respectively, * and other @pos values will get mapped to -1. When @pos value 7 * gets mapped to (returns) @ord value 3 in this example, that means * that bit 7 is the 3rd (starting with 0th) set bit in @buf. * * The bit positions 0 through @bits are valid positions in @buf. */ static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigned int nbits) { if (pos >= nbits || !test_bit(pos, buf)) return -1; return bitmap_weight(buf, pos); } /** * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap * @dst: remapped result * @src: subset to be remapped * @old: defines domain of map * @new: defines range of map * @nbits: number of bits in each of these bitmaps * * Let @old and @new define a mapping of bit positions, such that * whatever position is held by the n-th set bit in @old is mapped * to the n-th set bit in @new. In the more general case, allowing * for the possibility that the weight 'w' of @new is less than the * weight of @old, map the position of the n-th set bit in @old to * the position of the m-th set bit in @new, where m == n % w. * * If either of the @old and @new bitmaps are empty, or if @src and * @dst point to the same location, then this routine copies @src * to @dst. * * The positions of unset bits in @old are mapped to themselves * (the identity map). * * Apply the above specified mapping to @src, placing the result in * @dst, clearing any bits previously set in @dst. * * For example, lets say that @old has bits 4 through 7 set, and * @new has bits 12 through 15 set. This defines the mapping of bit * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other * bit positions unchanged. So if say @src comes into this routine * with bits 1, 5 and 7 set, then @dst should leave with bits 1, * 13 and 15 set. */ void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, unsigned int nbits) { unsigned int oldbit, w; if (dst == src) /* following doesn't handle inplace remaps */ return; bitmap_zero(dst, nbits); w = bitmap_weight(new, nbits); for_each_set_bit(oldbit, src, nbits) { int n = bitmap_pos_to_ord(old, oldbit, nbits); if (n < 0 || w == 0) set_bit(oldbit, dst); /* identity map */ else set_bit(find_nth_bit(new, nbits, n % w), dst); } } EXPORT_SYMBOL(bitmap_remap); /** * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit * @oldbit: bit position to be mapped * @old: defines domain of map * @new: defines range of map * @bits: number of bits in each of these bitmaps * * Let @old and @new define a mapping of bit positions, such that * whatever position is held by the n-th set bit in @old is mapped * to the n-th set bit in @new. In the more general case, allowing * for the possibility that the weight 'w' of @new is less than the * weight of @old, map the position of the n-th set bit in @old to * the position of the m-th set bit in @new, where m == n % w. * * The positions of unset bits in @old are mapped to themselves * (the identity map). * * Apply the above specified mapping to bit position @oldbit, returning * the new bit position. * * For example, lets say that @old has bits 4 through 7 set, and * @new has bits 12 through 15 set. This defines the mapping of bit * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other * bit positions unchanged. So if say @oldbit is 5, then this routine * returns 13. */ int bitmap_bitremap(int oldbit, const unsigned long *old, const unsigned long *new, int bits) { int w = bitmap_weight(new, bits); int n = bitmap_pos_to_ord(old, oldbit, bits); if (n < 0 || w == 0) return oldbit; else return find_nth_bit(new, bits, n % w); } EXPORT_SYMBOL(bitmap_bitremap); #ifdef CONFIG_NUMA /** * bitmap_onto - translate one bitmap relative to another * @dst: resulting translated bitmap * @orig: original untranslated bitmap * @relmap: bitmap relative to which translated * @bits: number of bits in each of these bitmaps * * Set the n-th bit of @dst iff there exists some m such that the * n-th bit of @relmap is set, the m-th bit of @orig is set, and * the n-th bit of @relmap is also the m-th _set_ bit of @relmap. * (If you understood the previous sentence the first time your * read it, you're overqualified for your current job.) * * In other words, @orig is mapped onto (surjectively) @dst, * using the map { <n, m> | the n-th bit of @relmap is the * m-th set bit of @relmap }. * * Any set bits in @orig above bit number W, where W is the * weight of (number of set bits in) @relmap are mapped nowhere. * In particular, if for all bits m set in @orig, m >= W, then * @dst will end up empty. In situations where the possibility * of such an empty result is not desired, one way to avoid it is * to use the bitmap_fold() operator, below, to first fold the * @orig bitmap over itself so that all its set bits x are in the * range 0 <= x < W. The bitmap_fold() operator does this by * setting the bit (m % W) in @dst, for each bit (m) set in @orig. * * Example [1] for bitmap_onto(): * Let's say @relmap has bits 30-39 set, and @orig has bits * 1, 3, 5, 7, 9 and 11 set. Then on return from this routine, * @dst will have bits 31, 33, 35, 37 and 39 set. * * When bit 0 is set in @orig, it means turn on the bit in * @dst corresponding to whatever is the first bit (if any) * that is turned on in @relmap. Since bit 0 was off in the * above example, we leave off that bit (bit 30) in @dst. * * When bit 1 is set in @orig (as in the above example), it * means turn on the bit in @dst corresponding to whatever * is the second bit that is turned on in @relmap. The second * bit in @relmap that was turned on in the above example was * bit 31, so we turned on bit 31 in @dst. * * Similarly, we turned on bits 33, 35, 37 and 39 in @dst, * because they were the 4th, 6th, 8th and 10th set bits * set in @relmap, and the 4th, 6th, 8th and 10th bits of * @orig (i.e. bits 3, 5, 7 and 9) were also set. * * When bit 11 is set in @orig, it means turn on the bit in * @dst corresponding to whatever is the twelfth bit that is * turned on in @relmap. In the above example, there were * only ten bits turned on in @relmap (30..39), so that bit * 11 was set in @orig had no affect on @dst. * * Example [2] for bitmap_fold() + bitmap_onto(): * Let's say @relmap has these ten bits set:: * * 40 41 42 43 45 48 53 61 74 95 * * (for the curious, that's 40 plus the first ten terms of the * Fibonacci sequence.) * * Further lets say we use the following code, invoking * bitmap_fold() then bitmap_onto, as suggested above to * avoid the possibility of an empty @dst result:: * * unsigned long *tmp; // a temporary bitmap's bits * * bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits); * bitmap_onto(dst, tmp, relmap, bits); * * Then this table shows what various values of @dst would be, for * various @orig's. I list the zero-based positions of each set bit. * The tmp column shows the intermediate result, as computed by * using bitmap_fold() to fold the @orig bitmap modulo ten * (the weight of @relmap): * * =============== ============== ================= * @orig tmp @dst * 0 0 40 * 1 1 41 * 9 9 95 * 10 0 40 [#f1]_ * 1 3 5 7 1 3 5 7 41 43 48 61 * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45 * 0 9 18 27 0 9 8 7 40 61 74 95 * 0 10 20 30 0 40 * 0 11 22 33 0 1 2 3 40 41 42 43 * 0 12 24 36 0 2 4 6 40 42 45 53 * 78 102 211 1 2 8 41 42 74 [#f1]_ * =============== ============== ================= * * .. [#f1] * * For these marked lines, if we hadn't first done bitmap_fold() * into tmp, then the @dst result would have been empty. * * If either of @orig or @relmap is empty (no set bits), then @dst * will be returned empty. * * If (as explained above) the only set bits in @orig are in positions * m where m >= W, (where W is the weight of @relmap) then @dst will * once again be returned empty. * * All bits in @dst not set by the above rule are cleared. */ void bitmap_onto(unsigned long *dst, const unsigned long *orig, const unsigned long *relmap, unsigned int bits) { unsigned int n, m; /* same meaning as in above comment */ if (dst == orig) /* following doesn't handle inplace mappings */ return; bitmap_zero(dst, bits); /* * The following code is a more efficient, but less * obvious, equivalent to the loop: * for (m = 0; m < bitmap_weight(relmap, bits); m++) { * n = find_nth_bit(orig, bits, m); * if (test_bit(m, orig)) * set_bit(n, dst); * } */ m = 0; for_each_set_bit(n, relmap, bits) { /* m == bitmap_pos_to_ord(relmap, n, bits) */ if (test_bit(m, orig)) set_bit(n, dst); m++; } } /** * bitmap_fold - fold larger bitmap into smaller, modulo specified size * @dst: resulting smaller bitmap * @orig: original larger bitmap * @sz: specified size * @nbits: number of bits in each of these bitmaps * * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst. * Clear all other bits in @dst. See further the comment and * Example [2] for bitmap_onto() for why and how to use this. */ void bitmap_fold(unsigned long *dst, const unsigned long *orig, unsigned int sz, unsigned int nbits) { unsigned int oldbit; if (dst == orig) /* following doesn't handle inplace mappings */ return; bitmap_zero(dst, nbits); for_each_set_bit(oldbit, orig, nbits) set_bit(oldbit % sz, dst); } #endif /* CONFIG_NUMA */ unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags) { return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags); } EXPORT_SYMBOL(bitmap_alloc); unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags) { return bitmap_alloc(nbits, flags | __GFP_ZERO); } EXPORT_SYMBOL(bitmap_zalloc); unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node) { return kmalloc_array_node(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags, node); } EXPORT_SYMBOL(bitmap_alloc_node); unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node) { return bitmap_alloc_node(nbits, flags | __GFP_ZERO, node); } EXPORT_SYMBOL(bitmap_zalloc_node); void bitmap_free(const unsigned long *bitmap) { kfree(bitmap); } EXPORT_SYMBOL(bitmap_free); static void devm_bitmap_free(void *data) { unsigned long *bitmap = data; bitmap_free(bitmap); } unsigned long *devm_bitmap_alloc(struct device *dev, unsigned int nbits, gfp_t flags) { unsigned long *bitmap; int ret; bitmap = bitmap_alloc(nbits, flags); if (!bitmap) return NULL; ret = devm_add_action_or_reset(dev, devm_bitmap_free, bitmap); if (ret) return NULL; return bitmap; } EXPORT_SYMBOL_GPL(devm_bitmap_alloc); unsigned long *devm_bitmap_zalloc(struct device *dev, unsigned int nbits, gfp_t flags) { return devm_bitmap_alloc(dev, nbits, flags | __GFP_ZERO); } EXPORT_SYMBOL_GPL(devm_bitmap_zalloc); #if BITS_PER_LONG == 64 /** * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap * @bitmap: array of unsigned longs, the destination bitmap * @buf: array of u32 (in host byte order), the source bitmap * @nbits: number of bits in @bitmap */ void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits) { unsigned int i, halfwords; halfwords = DIV_ROUND_UP(nbits, 32); for (i = 0; i < halfwords; i++) { bitmap[i/2] = (unsigned long) buf[i]; if (++i < halfwords) bitmap[i/2] |= ((unsigned long) buf[i]) << 32; } /* Clear tail bits in last word beyond nbits. */ if (nbits % BITS_PER_LONG) bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits); } EXPORT_SYMBOL(bitmap_from_arr32); /** * bitmap_to_arr32 - copy the contents of bitmap to a u32 array of bits * @buf: array of u32 (in host byte order), the dest bitmap * @bitmap: array of unsigned longs, the source bitmap * @nbits: number of bits in @bitmap */ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits) { unsigned int i, halfwords; halfwords = DIV_ROUND_UP(nbits, 32); for (i = 0; i < halfwords; i++) { buf[i] = (u32) (bitmap[i/2] & UINT_MAX); if (++i < halfwords) buf[i] = (u32) (bitmap[i/2] >> 32); } /* Clear tail bits in last element of array beyond nbits. */ if (nbits % BITS_PER_LONG) buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31)); } EXPORT_SYMBOL(bitmap_to_arr32); #endif #if BITS_PER_LONG == 32 /** * bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap * @bitmap: array of unsigned longs, the destination bitmap * @buf: array of u64 (in host byte order), the source bitmap * @nbits: number of bits in @bitmap */ void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits) { int n; for (n = nbits; n > 0; n -= 64) { u64 val = *buf++; *bitmap++ = val; if (n > 32) *bitmap++ = val >> 32; } /* * Clear tail bits in the last word beyond nbits. * * Negative index is OK because here we point to the word next * to the last word of the bitmap, except for nbits == 0, which * is tested implicitly. */ if (nbits % BITS_PER_LONG) bitmap[-1] &= BITMAP_LAST_WORD_MASK(nbits); } EXPORT_SYMBOL(bitmap_from_arr64); /** * bitmap_to_arr64 - copy the contents of bitmap to a u64 array of bits * @buf: array of u64 (in host byte order), the dest bitmap * @bitmap: array of unsigned longs, the source bitmap * @nbits: number of bits in @bitmap */ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits) { const unsigned long *end = bitmap + BITS_TO_LONGS(nbits); while (bitmap < end) { *buf = *bitmap++; if (bitmap < end) *buf |= (u64)(*bitmap++) << 32; buf++; } /* Clear tail bits in the last element of array beyond nbits. */ if (nbits % 64) buf[-1] &= GENMASK_ULL((nbits - 1) % 64, 0); } EXPORT_SYMBOL(bitmap_to_arr64); #endif
1 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 // SPDX-License-Identifier: GPL-2.0-only /* * HIDPP protocol for Logitech receivers * * Copyright (c) 2011 Logitech (c) * Copyright (c) 2012-2013 Google (c) * Copyright (c) 2013-2014 Red Hat Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/input.h> #include <linux/usb.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/kfifo.h> #include <linux/input/mt.h> #include <linux/workqueue.h> #include <linux/atomic.h> #include <linux/fixp-arith.h> #include <asm/unaligned.h> #include "usbhid/usbhid.h" #include "hid-ids.h" MODULE_DESCRIPTION("Support for Logitech devices relying on the HID++ specification"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>"); MODULE_AUTHOR("Nestor Lopez Casado <nlopezcasad@logitech.com>"); MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>"); static bool disable_tap_to_click; module_param(disable_tap_to_click, bool, 0644); MODULE_PARM_DESC(disable_tap_to_click, "Disable Tap-To-Click mode reporting for touchpads (only on the K400 currently)."); /* Define a non-zero software ID to identify our own requests */ #define LINUX_KERNEL_SW_ID 0x01 #define REPORT_ID_HIDPP_SHORT 0x10 #define REPORT_ID_HIDPP_LONG 0x11 #define REPORT_ID_HIDPP_VERY_LONG 0x12 #define HIDPP_REPORT_SHORT_LENGTH 7 #define HIDPP_REPORT_LONG_LENGTH 20 #define HIDPP_REPORT_VERY_LONG_MAX_LENGTH 64 #define HIDPP_REPORT_SHORT_SUPPORTED BIT(0) #define HIDPP_REPORT_LONG_SUPPORTED BIT(1) #define HIDPP_REPORT_VERY_LONG_SUPPORTED BIT(2) #define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS 0x03 #define HIDPP_SUB_ID_ROLLER 0x05 #define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS 0x06 #define HIDPP_SUB_ID_USER_IFACE_EVENT 0x08 #define HIDPP_USER_IFACE_EVENT_ENCRYPTION_KEY_LOST BIT(5) #define HIDPP_QUIRK_CLASS_WTP BIT(0) #define HIDPP_QUIRK_CLASS_M560 BIT(1) #define HIDPP_QUIRK_CLASS_K400 BIT(2) #define HIDPP_QUIRK_CLASS_G920 BIT(3) #define HIDPP_QUIRK_CLASS_K750 BIT(4) /* bits 2..20 are reserved for classes */ /* #define HIDPP_QUIRK_CONNECT_EVENTS BIT(21) disabled */ #define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22) #define HIDPP_QUIRK_DELAYED_INIT BIT(23) #define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24) #define HIDPP_QUIRK_HIDPP_WHEELS BIT(25) #define HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS BIT(26) #define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS BIT(27) #define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(28) #define HIDPP_QUIRK_WIRELESS_STATUS BIT(29) /* These are just aliases for now */ #define HIDPP_QUIRK_KBD_SCROLL_WHEEL HIDPP_QUIRK_HIDPP_WHEELS #define HIDPP_QUIRK_KBD_ZOOM_WHEEL HIDPP_QUIRK_HIDPP_WHEELS /* Convenience constant to check for any high-res support. */ #define HIDPP_CAPABILITY_HI_RES_SCROLL (HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL | \ HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL | \ HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL) #define HIDPP_CAPABILITY_HIDPP10_BATTERY BIT(0) #define HIDPP_CAPABILITY_HIDPP20_BATTERY BIT(1) #define HIDPP_CAPABILITY_BATTERY_MILEAGE BIT(2) #define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS BIT(3) #define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4) #define HIDPP_CAPABILITY_BATTERY_PERCENTAGE BIT(5) #define HIDPP_CAPABILITY_UNIFIED_BATTERY BIT(6) #define HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL BIT(7) #define HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL BIT(8) #define HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL BIT(9) #define HIDPP_CAPABILITY_ADC_MEASUREMENT BIT(10) #define lg_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c)) /* * There are two hidpp protocols in use, the first version hidpp10 is known * as register access protocol or RAP, the second version hidpp20 is known as * feature access protocol or FAP * * Most older devices (including the Unifying usb receiver) use the RAP protocol * where as most newer devices use the FAP protocol. Both protocols are * compatible with the underlying transport, which could be usb, Unifiying, or * bluetooth. The message lengths are defined by the hid vendor specific report * descriptor for the HIDPP_SHORT report type (total message lenth 7 bytes) and * the HIDPP_LONG report type (total message length 20 bytes) * * The RAP protocol uses both report types, whereas the FAP only uses HIDPP_LONG * messages. The Unifying receiver itself responds to RAP messages (device index * is 0xFF for the receiver), and all messages (short or long) with a device * index between 1 and 6 are passed untouched to the corresponding paired * Unifying device. * * The paired device can be RAP or FAP, it will receive the message untouched * from the Unifiying receiver. */ struct fap { u8 feature_index; u8 funcindex_clientid; u8 params[HIDPP_REPORT_VERY_LONG_MAX_LENGTH - 4U]; }; struct rap { u8 sub_id; u8 reg_address; u8 params[HIDPP_REPORT_VERY_LONG_MAX_LENGTH - 4U]; }; struct hidpp_report { u8 report_id; u8 device_index; union { struct fap fap; struct rap rap; u8 rawbytes[sizeof(struct fap)]; }; } __packed; struct hidpp_battery { u8 feature_index; u8 solar_feature_index; u8 voltage_feature_index; u8 adc_measurement_feature_index; struct power_supply_desc desc; struct power_supply *ps; char name[64]; int status; int capacity; int level; int voltage; int charge_type; bool online; u8 supported_levels_1004; }; /** * struct hidpp_scroll_counter - Utility class for processing high-resolution * scroll events. * @dev: the input device for which events should be reported. * @wheel_multiplier: the scalar multiplier to be applied to each wheel event * @remainder: counts the number of high-resolution units moved since the last * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should * only be used by class methods. * @direction: direction of last movement (1 or -1) * @last_time: last event time, used to reset remainder after inactivity */ struct hidpp_scroll_counter { int wheel_multiplier; int remainder; int direction; unsigned long long last_time; }; struct hidpp_device { struct hid_device *hid_dev; struct input_dev *input; struct mutex send_mutex; void *send_receive_buf; char *name; /* will never be NULL and should not be freed */ wait_queue_head_t wait; int very_long_report_length; bool answer_available; u8 protocol_major; u8 protocol_minor; void *private_data; struct work_struct work; struct kfifo delayed_work_fifo; struct input_dev *delayed_input; unsigned long quirks; unsigned long capabilities; u8 supported_reports; struct hidpp_battery battery; struct hidpp_scroll_counter vertical_wheel_counter; u8 wireless_feature_index; bool connected_once; }; /* HID++ 1.0 error codes */ #define HIDPP_ERROR 0x8f #define HIDPP_ERROR_SUCCESS 0x00 #define HIDPP_ERROR_INVALID_SUBID 0x01 #define HIDPP_ERROR_INVALID_ADRESS 0x02 #define HIDPP_ERROR_INVALID_VALUE 0x03 #define HIDPP_ERROR_CONNECT_FAIL 0x04 #define HIDPP_ERROR_TOO_MANY_DEVICES 0x05 #define HIDPP_ERROR_ALREADY_EXISTS 0x06 #define HIDPP_ERROR_BUSY 0x07 #define HIDPP_ERROR_UNKNOWN_DEVICE 0x08 #define HIDPP_ERROR_RESOURCE_ERROR 0x09 #define HIDPP_ERROR_REQUEST_UNAVAILABLE 0x0a #define HIDPP_ERROR_INVALID_PARAM_VALUE 0x0b #define HIDPP_ERROR_WRONG_PIN_CODE 0x0c /* HID++ 2.0 error codes */ #define HIDPP20_ERROR_NO_ERROR 0x00 #define HIDPP20_ERROR_UNKNOWN 0x01 #define HIDPP20_ERROR_INVALID_ARGS 0x02 #define HIDPP20_ERROR_OUT_OF_RANGE 0x03 #define HIDPP20_ERROR_HW_ERROR 0x04 #define HIDPP20_ERROR_NOT_ALLOWED 0x05 #define HIDPP20_ERROR_INVALID_FEATURE_INDEX 0x06 #define HIDPP20_ERROR_INVALID_FUNCTION_ID 0x07 #define HIDPP20_ERROR_BUSY 0x08 #define HIDPP20_ERROR_UNSUPPORTED 0x09 #define HIDPP20_ERROR 0xff static int __hidpp_send_report(struct hid_device *hdev, struct hidpp_report *hidpp_report) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); int fields_count, ret; switch (hidpp_report->report_id) { case REPORT_ID_HIDPP_SHORT: fields_count = HIDPP_REPORT_SHORT_LENGTH; break; case REPORT_ID_HIDPP_LONG: fields_count = HIDPP_REPORT_LONG_LENGTH; break; case REPORT_ID_HIDPP_VERY_LONG: fields_count = hidpp->very_long_report_length; break; default: return -ENODEV; } /* * set the device_index as the receiver, it will be overwritten by * hid_hw_request if needed */ hidpp_report->device_index = 0xff; if (hidpp->quirks & HIDPP_QUIRK_FORCE_OUTPUT_REPORTS) { ret = hid_hw_output_report(hdev, (u8 *)hidpp_report, fields_count); } else { ret = hid_hw_raw_request(hdev, hidpp_report->report_id, (u8 *)hidpp_report, fields_count, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); } return ret == fields_count ? 0 : -1; } /* * Effectively send the message to the device, waiting for its answer. * * Must be called with hidpp->send_mutex locked * * Same return protocol than hidpp_send_message_sync(): * - success on 0 * - negative error means transport error * - positive value means protocol error */ static int __do_hidpp_send_message_sync(struct hidpp_device *hidpp, struct hidpp_report *message, struct hidpp_report *response) { int ret; __must_hold(&hidpp->send_mutex); hidpp->send_receive_buf = response; hidpp->answer_available = false; /* * So that we can later validate the answer when it arrives * in hidpp_raw_event */ *response = *message; ret = __hidpp_send_report(hidpp->hid_dev, message); if (ret) { dbg_hid("__hidpp_send_report returned err: %d\n", ret); memset(response, 0, sizeof(struct hidpp_report)); return ret; } if (!wait_event_timeout(hidpp->wait, hidpp->answer_available, 5*HZ)) { dbg_hid("%s:timeout waiting for response\n", __func__); memset(response, 0, sizeof(struct hidpp_report)); return -ETIMEDOUT; } if (response->report_id == REPORT_ID_HIDPP_SHORT && response->rap.sub_id == HIDPP_ERROR) { ret = response->rap.params[1]; dbg_hid("%s:got hidpp error %02X\n", __func__, ret); return ret; } if ((response->report_id == REPORT_ID_HIDPP_LONG || response->report_id == REPORT_ID_HIDPP_VERY_LONG) && response->fap.feature_index == HIDPP20_ERROR) { ret = response->fap.params[1]; dbg_hid("%s:got hidpp 2.0 error %02X\n", __func__, ret); return ret; } return 0; } /* * hidpp_send_message_sync() returns 0 in case of success, and something else * in case of a failure. * * See __do_hidpp_send_message_sync() for a detailed explanation of the returned * value. */ static int hidpp_send_message_sync(struct hidpp_device *hidpp, struct hidpp_report *message, struct hidpp_report *response) { int ret; int max_retries = 3; mutex_lock(&hidpp->send_mutex); do { ret = __do_hidpp_send_message_sync(hidpp, message, response); if (ret != HIDPP20_ERROR_BUSY) break; dbg_hid("%s:got busy hidpp 2.0 error %02X, retrying\n", __func__, ret); } while (--max_retries); mutex_unlock(&hidpp->send_mutex); return ret; } /* * hidpp_send_fap_command_sync() returns 0 in case of success, and something else * in case of a failure. * * See __do_hidpp_send_message_sync() for a detailed explanation of the returned * value. */ static int hidpp_send_fap_command_sync(struct hidpp_device *hidpp, u8 feat_index, u8 funcindex_clientid, u8 *params, int param_count, struct hidpp_report *response) { struct hidpp_report *message; int ret; if (param_count > sizeof(message->fap.params)) { hid_dbg(hidpp->hid_dev, "Invalid number of parameters passed to command (%d != %llu)\n", param_count, (unsigned long long) sizeof(message->fap.params)); return -EINVAL; } message = kzalloc(sizeof(struct hidpp_report), GFP_KERNEL); if (!message) return -ENOMEM; if (param_count > (HIDPP_REPORT_LONG_LENGTH - 4)) message->report_id = REPORT_ID_HIDPP_VERY_LONG; else message->report_id = REPORT_ID_HIDPP_LONG; message->fap.feature_index = feat_index; message->fap.funcindex_clientid = funcindex_clientid | LINUX_KERNEL_SW_ID; memcpy(&message->fap.params, params, param_count); ret = hidpp_send_message_sync(hidpp, message, response); kfree(message); return ret; } /* * hidpp_send_rap_command_sync() returns 0 in case of success, and something else * in case of a failure. * * See __do_hidpp_send_message_sync() for a detailed explanation of the returned * value. */ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev, u8 report_id, u8 sub_id, u8 reg_address, u8 *params, int param_count, struct hidpp_report *response) { struct hidpp_report *message; int ret, max_count; /* Send as long report if short reports are not supported. */ if (report_id == REPORT_ID_HIDPP_SHORT && !(hidpp_dev->supported_reports & HIDPP_REPORT_SHORT_SUPPORTED)) report_id = REPORT_ID_HIDPP_LONG; switch (report_id) { case REPORT_ID_HIDPP_SHORT: max_count = HIDPP_REPORT_SHORT_LENGTH - 4; break; case REPORT_ID_HIDPP_LONG: max_count = HIDPP_REPORT_LONG_LENGTH - 4; break; case REPORT_ID_HIDPP_VERY_LONG: max_count = hidpp_dev->very_long_report_length - 4; break; default: return -EINVAL; } if (param_count > max_count) return -EINVAL; message = kzalloc(sizeof(struct hidpp_report), GFP_KERNEL); if (!message) return -ENOMEM; message->report_id = report_id; message->rap.sub_id = sub_id; message->rap.reg_address = reg_address; memcpy(&message->rap.params, params, param_count); ret = hidpp_send_message_sync(hidpp_dev, message, response); kfree(message); return ret; } static inline bool hidpp_match_answer(struct hidpp_report *question, struct hidpp_report *answer) { return (answer->fap.feature_index == question->fap.feature_index) && (answer->fap.funcindex_clientid == question->fap.funcindex_clientid); } static inline bool hidpp_match_error(struct hidpp_report *question, struct hidpp_report *answer) { return ((answer->rap.sub_id == HIDPP_ERROR) || (answer->fap.feature_index == HIDPP20_ERROR)) && (answer->fap.funcindex_clientid == question->fap.feature_index) && (answer->fap.params[0] == question->fap.funcindex_clientid); } static inline bool hidpp_report_is_connect_event(struct hidpp_device *hidpp, struct hidpp_report *report) { return (hidpp->wireless_feature_index && (report->fap.feature_index == hidpp->wireless_feature_index)) || ((report->report_id == REPORT_ID_HIDPP_SHORT) && (report->rap.sub_id == 0x41)); } /* * hidpp_prefix_name() prefixes the current given name with "Logitech ". */ static void hidpp_prefix_name(char **name, int name_length) { #define PREFIX_LENGTH 9 /* "Logitech " */ int new_length; char *new_name; if (name_length > PREFIX_LENGTH && strncmp(*name, "Logitech ", PREFIX_LENGTH) == 0) /* The prefix has is already in the name */ return; new_length = PREFIX_LENGTH + name_length; new_name = kzalloc(new_length, GFP_KERNEL); if (!new_name) return; snprintf(new_name, new_length, "Logitech %s", *name); kfree(*name); *name = new_name; } /* * Updates the USB wireless_status based on whether the headset * is turned on and reachable. */ static void hidpp_update_usb_wireless_status(struct hidpp_device *hidpp) { struct hid_device *hdev = hidpp->hid_dev; struct usb_interface *intf; if (!(hidpp->quirks & HIDPP_QUIRK_WIRELESS_STATUS)) return; if (!hid_is_usb(hdev)) return; intf = to_usb_interface(hdev->dev.parent); usb_set_wireless_status(intf, hidpp->battery.online ? USB_WIRELESS_STATUS_CONNECTED : USB_WIRELESS_STATUS_DISCONNECTED); } /** * hidpp_scroll_counter_handle_scroll() - Send high- and low-resolution scroll * events given a high-resolution wheel * movement. * @input_dev: Pointer to the input device * @counter: a hid_scroll_counter struct describing the wheel. * @hi_res_value: the movement of the wheel, in the mouse's high-resolution * units. * * Given a high-resolution movement, this function converts the movement into * fractions of 120 and emits high-resolution scroll events for the input * device. It also uses the multiplier from &struct hid_scroll_counter to * emit low-resolution scroll events when appropriate for * backwards-compatibility with userspace input libraries. */ static void hidpp_scroll_counter_handle_scroll(struct input_dev *input_dev, struct hidpp_scroll_counter *counter, int hi_res_value) { int low_res_value, remainder, direction; unsigned long long now, previous; hi_res_value = hi_res_value * 120/counter->wheel_multiplier; input_report_rel(input_dev, REL_WHEEL_HI_RES, hi_res_value); remainder = counter->remainder; direction = hi_res_value > 0 ? 1 : -1; now = sched_clock(); previous = counter->last_time; counter->last_time = now; /* * Reset the remainder after a period of inactivity or when the * direction changes. This prevents the REL_WHEEL emulation point * from sliding for devices that don't always provide the same * number of movements per detent. */ if (now - previous > 1000000000 || direction != counter->direction) remainder = 0; counter->direction = direction; remainder += hi_res_value; /* Some wheels will rest 7/8ths of a detent from the previous detent * after slow movement, so we want the threshold for low-res events to * be in the middle between two detents (e.g. after 4/8ths) as * opposed to on the detents themselves (8/8ths). */ if (abs(remainder) >= 60) { /* Add (or subtract) 1 because we want to trigger when the wheel * is half-way to the next detent (i.e. scroll 1 detent after a * 1/2 detent movement, 2 detents after a 1 1/2 detent movement, * etc.). */ low_res_value = remainder / 120; if (low_res_value == 0) low_res_value = (hi_res_value > 0 ? 1 : -1); input_report_rel(input_dev, REL_WHEEL, low_res_value); remainder -= low_res_value * 120; } counter->remainder = remainder; } /* -------------------------------------------------------------------------- */ /* HIDP++ 1.0 commands */ /* -------------------------------------------------------------------------- */ #define HIDPP_SET_REGISTER 0x80 #define HIDPP_GET_REGISTER 0x81 #define HIDPP_SET_LONG_REGISTER 0x82 #define HIDPP_GET_LONG_REGISTER 0x83 /** * hidpp10_set_register - Modify a HID++ 1.0 register. * @hidpp_dev: the device to set the register on. * @register_address: the address of the register to modify. * @byte: the byte of the register to modify. Should be less than 3. * @mask: mask of the bits to modify * @value: new values for the bits in mask * Return: 0 if successful, otherwise a negative error code. */ static int hidpp10_set_register(struct hidpp_device *hidpp_dev, u8 register_address, u8 byte, u8 mask, u8 value) { struct hidpp_report response; int ret; u8 params[3] = { 0 }; ret = hidpp_send_rap_command_sync(hidpp_dev, REPORT_ID_HIDPP_SHORT, HIDPP_GET_REGISTER, register_address, NULL, 0, &response); if (ret) return ret; memcpy(params, response.rap.params, 3); params[byte] &= ~mask; params[byte] |= value & mask; return hidpp_send_rap_command_sync(hidpp_dev, REPORT_ID_HIDPP_SHORT, HIDPP_SET_REGISTER, register_address, params, 3, &response); } #define HIDPP_REG_ENABLE_REPORTS 0x00 #define HIDPP_ENABLE_CONSUMER_REPORT BIT(0) #define HIDPP_ENABLE_WHEEL_REPORT BIT(2) #define HIDPP_ENABLE_MOUSE_EXTRA_BTN_REPORT BIT(3) #define HIDPP_ENABLE_BAT_REPORT BIT(4) #define HIDPP_ENABLE_HWHEEL_REPORT BIT(5) static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev) { return hidpp10_set_register(hidpp_dev, HIDPP_REG_ENABLE_REPORTS, 0, HIDPP_ENABLE_BAT_REPORT, HIDPP_ENABLE_BAT_REPORT); } #define HIDPP_REG_FEATURES 0x01 #define HIDPP_ENABLE_SPECIAL_BUTTON_FUNC BIT(1) #define HIDPP_ENABLE_FAST_SCROLL BIT(6) /* On HID++ 1.0 devices, high-res scroll was called "scrolling acceleration". */ static int hidpp10_enable_scrolling_acceleration(struct hidpp_device *hidpp_dev) { return hidpp10_set_register(hidpp_dev, HIDPP_REG_FEATURES, 0, HIDPP_ENABLE_FAST_SCROLL, HIDPP_ENABLE_FAST_SCROLL); } #define HIDPP_REG_BATTERY_STATUS 0x07 static int hidpp10_battery_status_map_level(u8 param) { int level; switch (param) { case 1 ... 2: level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; break; case 3 ... 4: level = POWER_SUPPLY_CAPACITY_LEVEL_LOW; break; case 5 ... 6: level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; break; case 7: level = POWER_SUPPLY_CAPACITY_LEVEL_HIGH; break; default: level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; } return level; } static int hidpp10_battery_status_map_status(u8 param) { int status; switch (param) { case 0x00: /* discharging (in use) */ status = POWER_SUPPLY_STATUS_DISCHARGING; break; case 0x21: /* (standard) charging */ case 0x24: /* fast charging */ case 0x25: /* slow charging */ status = POWER_SUPPLY_STATUS_CHARGING; break; case 0x26: /* topping charge */ case 0x22: /* charge complete */ status = POWER_SUPPLY_STATUS_FULL; break; case 0x20: /* unknown */ status = POWER_SUPPLY_STATUS_UNKNOWN; break; /* * 0x01...0x1F = reserved (not charging) * 0x23 = charging error * 0x27..0xff = reserved */ default: status = POWER_SUPPLY_STATUS_NOT_CHARGING; break; } return status; } static int hidpp10_query_battery_status(struct hidpp_device *hidpp) { struct hidpp_report response; int ret, status; ret = hidpp_send_rap_command_sync(hidpp, REPORT_ID_HIDPP_SHORT, HIDPP_GET_REGISTER, HIDPP_REG_BATTERY_STATUS, NULL, 0, &response); if (ret) return ret; hidpp->battery.level = hidpp10_battery_status_map_level(response.rap.params[0]); status = hidpp10_battery_status_map_status(response.rap.params[1]); hidpp->battery.status = status; /* the capacity is only available when discharging or full */ hidpp->battery.online = status == POWER_SUPPLY_STATUS_DISCHARGING || status == POWER_SUPPLY_STATUS_FULL; return 0; } #define HIDPP_REG_BATTERY_MILEAGE 0x0D static int hidpp10_battery_mileage_map_status(u8 param) { int status; switch (param >> 6) { case 0x00: /* discharging (in use) */ status = POWER_SUPPLY_STATUS_DISCHARGING; break; case 0x01: /* charging */ status = POWER_SUPPLY_STATUS_CHARGING; break; case 0x02: /* charge complete */ status = POWER_SUPPLY_STATUS_FULL; break; /* * 0x03 = charging error */ default: status = POWER_SUPPLY_STATUS_NOT_CHARGING; break; } return status; } static int hidpp10_query_battery_mileage(struct hidpp_device *hidpp) { struct hidpp_report response; int ret, status; ret = hidpp_send_rap_command_sync(hidpp, REPORT_ID_HIDPP_SHORT, HIDPP_GET_REGISTER, HIDPP_REG_BATTERY_MILEAGE, NULL, 0, &response); if (ret) return ret; hidpp->battery.capacity = response.rap.params[0]; status = hidpp10_battery_mileage_map_status(response.rap.params[2]); hidpp->battery.status = status; /* the capacity is only available when discharging or full */ hidpp->battery.online = status == POWER_SUPPLY_STATUS_DISCHARGING || status == POWER_SUPPLY_STATUS_FULL; return 0; } static int hidpp10_battery_event(struct hidpp_device *hidpp, u8 *data, int size) { struct hidpp_report *report = (struct hidpp_report *)data; int status, capacity, level; bool changed; if (report->report_id != REPORT_ID_HIDPP_SHORT) return 0; switch (report->rap.sub_id) { case HIDPP_REG_BATTERY_STATUS: capacity = hidpp->battery.capacity; level = hidpp10_battery_status_map_level(report->rawbytes[1]); status = hidpp10_battery_status_map_status(report->rawbytes[2]); break; case HIDPP_REG_BATTERY_MILEAGE: capacity = report->rap.params[0]; level = hidpp->battery.level; status = hidpp10_battery_mileage_map_status(report->rawbytes[3]); break; default: return 0; } changed = capacity != hidpp->battery.capacity || level != hidpp->battery.level || status != hidpp->battery.status; /* the capacity is only available when discharging or full */ hidpp->battery.online = status == POWER_SUPPLY_STATUS_DISCHARGING || status == POWER_SUPPLY_STATUS_FULL; if (changed) { hidpp->battery.level = level; hidpp->battery.status = status; if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); } return 0; } #define HIDPP_REG_PAIRING_INFORMATION 0xB5 #define HIDPP_EXTENDED_PAIRING 0x30 #define HIDPP_DEVICE_NAME 0x40 static char *hidpp_unifying_get_name(struct hidpp_device *hidpp_dev) { struct hidpp_report response; int ret; u8 params[1] = { HIDPP_DEVICE_NAME }; char *name; int len; ret = hidpp_send_rap_command_sync(hidpp_dev, REPORT_ID_HIDPP_SHORT, HIDPP_GET_LONG_REGISTER, HIDPP_REG_PAIRING_INFORMATION, params, 1, &response); if (ret) return NULL; len = response.rap.params[1]; if (2 + len > sizeof(response.rap.params)) return NULL; if (len < 4) /* logitech devices are usually at least Xddd */ return NULL; name = kzalloc(len + 1, GFP_KERNEL); if (!name) return NULL; memcpy(name, &response.rap.params[2], len); /* include the terminating '\0' */ hidpp_prefix_name(&name, len + 1); return name; } static int hidpp_unifying_get_serial(struct hidpp_device *hidpp, u32 *serial) { struct hidpp_report response; int ret; u8 params[1] = { HIDPP_EXTENDED_PAIRING }; ret = hidpp_send_rap_command_sync(hidpp, REPORT_ID_HIDPP_SHORT, HIDPP_GET_LONG_REGISTER, HIDPP_REG_PAIRING_INFORMATION, params, 1, &response); if (ret) return ret; /* * We don't care about LE or BE, we will output it as a string * with %4phD, so we need to keep the order. */ *serial = *((u32 *)&response.rap.params[1]); return 0; } static int hidpp_unifying_init(struct hidpp_device *hidpp) { struct hid_device *hdev = hidpp->hid_dev; const char *name; u32 serial; int ret; ret = hidpp_unifying_get_serial(hidpp, &serial); if (ret) return ret; snprintf(hdev->uniq, sizeof(hdev->uniq), "%4phD", &serial); dbg_hid("HID++ Unifying: Got serial: %s\n", hdev->uniq); name = hidpp_unifying_get_name(hidpp); if (!name) return -EIO; snprintf(hdev->name, sizeof(hdev->name), "%s", name); dbg_hid("HID++ Unifying: Got name: %s\n", name); kfree(name); return 0; } /* -------------------------------------------------------------------------- */ /* 0x0000: Root */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_ROOT 0x0000 #define HIDPP_PAGE_ROOT_IDX 0x00 #define CMD_ROOT_GET_FEATURE 0x00 #define CMD_ROOT_GET_PROTOCOL_VERSION 0x10 static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature, u8 *feature_index, u8 *feature_type) { struct hidpp_report response; int ret; u8 params[2] = { feature >> 8, feature & 0x00FF }; ret = hidpp_send_fap_command_sync(hidpp, HIDPP_PAGE_ROOT_IDX, CMD_ROOT_GET_FEATURE, params, 2, &response); if (ret) return ret; if (response.fap.params[0] == 0) return -ENOENT; *feature_index = response.fap.params[0]; *feature_type = response.fap.params[1]; return ret; } static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp) { const u8 ping_byte = 0x5a; u8 ping_data[3] = { 0, 0, ping_byte }; struct hidpp_report response; int ret; ret = hidpp_send_rap_command_sync(hidpp, REPORT_ID_HIDPP_SHORT, HIDPP_PAGE_ROOT_IDX, CMD_ROOT_GET_PROTOCOL_VERSION | LINUX_KERNEL_SW_ID, ping_data, sizeof(ping_data), &response); if (ret == HIDPP_ERROR_INVALID_SUBID) { hidpp->protocol_major = 1; hidpp->protocol_minor = 0; goto print_version; } /* the device might not be connected */ if (ret == HIDPP_ERROR_RESOURCE_ERROR) return -EIO; if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; if (response.rap.params[2] != ping_byte) { hid_err(hidpp->hid_dev, "%s: ping mismatch 0x%02x != 0x%02x\n", __func__, response.rap.params[2], ping_byte); return -EPROTO; } hidpp->protocol_major = response.rap.params[0]; hidpp->protocol_minor = response.rap.params[1]; print_version: if (!hidpp->connected_once) { hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n", hidpp->protocol_major, hidpp->protocol_minor); hidpp->connected_once = true; } else hid_dbg(hidpp->hid_dev, "HID++ %u.%u device connected.\n", hidpp->protocol_major, hidpp->protocol_minor); return 0; } /* -------------------------------------------------------------------------- */ /* 0x0003: Device Information */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_DEVICE_INFORMATION 0x0003 #define CMD_GET_DEVICE_INFO 0x00 static int hidpp_get_serial(struct hidpp_device *hidpp, u32 *serial) { struct hidpp_report response; u8 feature_type; u8 feature_index; int ret; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_DEVICE_INFORMATION, &feature_index, &feature_type); if (ret) return ret; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_GET_DEVICE_INFO, NULL, 0, &response); if (ret) return ret; /* See hidpp_unifying_get_serial() */ *serial = *((u32 *)&response.rap.params[1]); return 0; } static int hidpp_serial_init(struct hidpp_device *hidpp) { struct hid_device *hdev = hidpp->hid_dev; u32 serial; int ret; ret = hidpp_get_serial(hidpp, &serial); if (ret) return ret; snprintf(hdev->uniq, sizeof(hdev->uniq), "%4phD", &serial); dbg_hid("HID++ DeviceInformation: Got serial: %s\n", hdev->uniq); return 0; } /* -------------------------------------------------------------------------- */ /* 0x0005: GetDeviceNameType */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_GET_DEVICE_NAME_TYPE 0x0005 #define CMD_GET_DEVICE_NAME_TYPE_GET_COUNT 0x00 #define CMD_GET_DEVICE_NAME_TYPE_GET_DEVICE_NAME 0x10 #define CMD_GET_DEVICE_NAME_TYPE_GET_TYPE 0x20 static int hidpp_devicenametype_get_count(struct hidpp_device *hidpp, u8 feature_index, u8 *nameLength) { struct hidpp_report response; int ret; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_GET_DEVICE_NAME_TYPE_GET_COUNT, NULL, 0, &response); if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; *nameLength = response.fap.params[0]; return ret; } static int hidpp_devicenametype_get_device_name(struct hidpp_device *hidpp, u8 feature_index, u8 char_index, char *device_name, int len_buf) { struct hidpp_report response; int ret, i; int count; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_GET_DEVICE_NAME_TYPE_GET_DEVICE_NAME, &char_index, 1, &response); if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; switch (response.report_id) { case REPORT_ID_HIDPP_VERY_LONG: count = hidpp->very_long_report_length - 4; break; case REPORT_ID_HIDPP_LONG: count = HIDPP_REPORT_LONG_LENGTH - 4; break; case REPORT_ID_HIDPP_SHORT: count = HIDPP_REPORT_SHORT_LENGTH - 4; break; default: return -EPROTO; } if (len_buf < count) count = len_buf; for (i = 0; i < count; i++) device_name[i] = response.fap.params[i]; return count; } static char *hidpp_get_device_name(struct hidpp_device *hidpp) { u8 feature_type; u8 feature_index; u8 __name_length; char *name; unsigned index = 0; int ret; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_GET_DEVICE_NAME_TYPE, &feature_index, &feature_type); if (ret) return NULL; ret = hidpp_devicenametype_get_count(hidpp, feature_index, &__name_length); if (ret) return NULL; name = kzalloc(__name_length + 1, GFP_KERNEL); if (!name) return NULL; while (index < __name_length) { ret = hidpp_devicenametype_get_device_name(hidpp, feature_index, index, name + index, __name_length - index); if (ret <= 0) { kfree(name); return NULL; } index += ret; } /* include the terminating '\0' */ hidpp_prefix_name(&name, __name_length + 1); return name; } /* -------------------------------------------------------------------------- */ /* 0x1000: Battery level status */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_BATTERY_LEVEL_STATUS 0x1000 #define CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_LEVEL_STATUS 0x00 #define CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_CAPABILITY 0x10 #define EVENT_BATTERY_LEVEL_STATUS_BROADCAST 0x00 #define FLAG_BATTERY_LEVEL_DISABLE_OSD BIT(0) #define FLAG_BATTERY_LEVEL_MILEAGE BIT(1) #define FLAG_BATTERY_LEVEL_RECHARGEABLE BIT(2) static int hidpp_map_battery_level(int capacity) { if (capacity < 11) return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; /* * The spec says this should be < 31 but some devices report 30 * with brand new batteries and Windows reports 30 as "Good". */ else if (capacity < 30) return POWER_SUPPLY_CAPACITY_LEVEL_LOW; else if (capacity < 81) return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; return POWER_SUPPLY_CAPACITY_LEVEL_FULL; } static int hidpp20_batterylevel_map_status_capacity(u8 data[3], int *capacity, int *next_capacity, int *level) { int status; *capacity = data[0]; *next_capacity = data[1]; *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; /* When discharging, we can rely on the device reported capacity. * For all other states the device reports 0 (unknown). */ switch (data[2]) { case 0: /* discharging (in use) */ status = POWER_SUPPLY_STATUS_DISCHARGING; *level = hidpp_map_battery_level(*capacity); break; case 1: /* recharging */ status = POWER_SUPPLY_STATUS_CHARGING; break; case 2: /* charge in final stage */ status = POWER_SUPPLY_STATUS_CHARGING; break; case 3: /* charge complete */ status = POWER_SUPPLY_STATUS_FULL; *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; *capacity = 100; break; case 4: /* recharging below optimal speed */ status = POWER_SUPPLY_STATUS_CHARGING; break; /* 5 = invalid battery type 6 = thermal error 7 = other charging error */ default: status = POWER_SUPPLY_STATUS_NOT_CHARGING; break; } return status; } static int hidpp20_batterylevel_get_battery_capacity(struct hidpp_device *hidpp, u8 feature_index, int *status, int *capacity, int *next_capacity, int *level) { struct hidpp_report response; int ret; u8 *params = (u8 *)response.fap.params; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_LEVEL_STATUS, NULL, 0, &response); /* Ignore these intermittent errors */ if (ret == HIDPP_ERROR_RESOURCE_ERROR) return -EIO; if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; *status = hidpp20_batterylevel_map_status_capacity(params, capacity, next_capacity, level); return 0; } static int hidpp20_batterylevel_get_battery_info(struct hidpp_device *hidpp, u8 feature_index) { struct hidpp_report response; int ret; u8 *params = (u8 *)response.fap.params; unsigned int level_count, flags; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_CAPABILITY, NULL, 0, &response); if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; level_count = params[0]; flags = params[1]; if (level_count < 10 || !(flags & FLAG_BATTERY_LEVEL_MILEAGE)) hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS; else hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_MILEAGE; return 0; } static int hidpp20_query_battery_info_1000(struct hidpp_device *hidpp) { u8 feature_type; int ret; int status, capacity, next_capacity, level; if (hidpp->battery.feature_index == 0xff) { ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_BATTERY_LEVEL_STATUS, &hidpp->battery.feature_index, &feature_type); if (ret) return ret; } ret = hidpp20_batterylevel_get_battery_capacity(hidpp, hidpp->battery.feature_index, &status, &capacity, &next_capacity, &level); if (ret) return ret; ret = hidpp20_batterylevel_get_battery_info(hidpp, hidpp->battery.feature_index); if (ret) return ret; hidpp->battery.status = status; hidpp->battery.capacity = capacity; hidpp->battery.level = level; /* the capacity is only available when discharging or full */ hidpp->battery.online = status == POWER_SUPPLY_STATUS_DISCHARGING || status == POWER_SUPPLY_STATUS_FULL; return 0; } static int hidpp20_battery_event_1000(struct hidpp_device *hidpp, u8 *data, int size) { struct hidpp_report *report = (struct hidpp_report *)data; int status, capacity, next_capacity, level; bool changed; if (report->fap.feature_index != hidpp->battery.feature_index || report->fap.funcindex_clientid != EVENT_BATTERY_LEVEL_STATUS_BROADCAST) return 0; status = hidpp20_batterylevel_map_status_capacity(report->fap.params, &capacity, &next_capacity, &level); /* the capacity is only available when discharging or full */ hidpp->battery.online = status == POWER_SUPPLY_STATUS_DISCHARGING || status == POWER_SUPPLY_STATUS_FULL; changed = capacity != hidpp->battery.capacity || level != hidpp->battery.level || status != hidpp->battery.status; if (changed) { hidpp->battery.level = level; hidpp->battery.capacity = capacity; hidpp->battery.status = status; if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); } return 0; } /* -------------------------------------------------------------------------- */ /* 0x1001: Battery voltage */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_BATTERY_VOLTAGE 0x1001 #define CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE 0x00 #define EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST 0x00 static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage, int *level, int *charge_type) { int status; long flags = (long) data[2]; *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; if (flags & 0x80) switch (flags & 0x07) { case 0: status = POWER_SUPPLY_STATUS_CHARGING; break; case 1: status = POWER_SUPPLY_STATUS_FULL; *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; break; case 2: status = POWER_SUPPLY_STATUS_NOT_CHARGING; break; default: status = POWER_SUPPLY_STATUS_UNKNOWN; break; } else status = POWER_SUPPLY_STATUS_DISCHARGING; *charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD; if (test_bit(3, &flags)) { *charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST; } if (test_bit(4, &flags)) { *charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; } if (test_bit(5, &flags)) { *level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; } *voltage = get_unaligned_be16(data); return status; } static int hidpp20_battery_get_battery_voltage(struct hidpp_device *hidpp, u8 feature_index, int *status, int *voltage, int *level, int *charge_type) { struct hidpp_report response; int ret; u8 *params = (u8 *)response.fap.params; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE, NULL, 0, &response); if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_VOLTAGE; *status = hidpp20_battery_map_status_voltage(params, voltage, level, charge_type); return 0; } static int hidpp20_map_battery_capacity(struct hid_device *hid_dev, int voltage) { /* NB: This voltage curve doesn't necessarily map perfectly to all * devices that implement the BATTERY_VOLTAGE feature. This is because * there are a few devices that use different battery technology. */ static const int voltages[100] = { 4186, 4156, 4143, 4133, 4122, 4113, 4103, 4094, 4086, 4075, 4067, 4059, 4051, 4043, 4035, 4027, 4019, 4011, 4003, 3997, 3989, 3983, 3976, 3969, 3961, 3955, 3949, 3942, 3935, 3929, 3922, 3916, 3909, 3902, 3896, 3890, 3883, 3877, 3870, 3865, 3859, 3853, 3848, 3842, 3837, 3833, 3828, 3824, 3819, 3815, 3811, 3808, 3804, 3800, 3797, 3793, 3790, 3787, 3784, 3781, 3778, 3775, 3772, 3770, 3767, 3764, 3762, 3759, 3757, 3754, 3751, 3748, 3744, 3741, 3737, 3734, 3730, 3726, 3724, 3720, 3717, 3714, 3710, 3706, 3702, 3697, 3693, 3688, 3683, 3677, 3671, 3666, 3662, 3658, 3654, 3646, 3633, 3612, 3579, 3537 }; int i; if (unlikely(voltage < 3500 || voltage >= 5000)) hid_warn_once(hid_dev, "%s: possibly using the wrong voltage curve\n", __func__); for (i = 0; i < ARRAY_SIZE(voltages); i++) { if (voltage >= voltages[i]) return ARRAY_SIZE(voltages) - i; } return 0; } static int hidpp20_query_battery_voltage_info(struct hidpp_device *hidpp) { u8 feature_type; int ret; int status, voltage, level, charge_type; if (hidpp->battery.voltage_feature_index == 0xff) { ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_BATTERY_VOLTAGE, &hidpp->battery.voltage_feature_index, &feature_type); if (ret) return ret; } ret = hidpp20_battery_get_battery_voltage(hidpp, hidpp->battery.voltage_feature_index, &status, &voltage, &level, &charge_type); if (ret) return ret; hidpp->battery.status = status; hidpp->battery.voltage = voltage; hidpp->battery.capacity = hidpp20_map_battery_capacity(hidpp->hid_dev, voltage); hidpp->battery.level = level; hidpp->battery.charge_type = charge_type; hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING; return 0; } static int hidpp20_battery_voltage_event(struct hidpp_device *hidpp, u8 *data, int size) { struct hidpp_report *report = (struct hidpp_report *)data; int status, voltage, level, charge_type; if (report->fap.feature_index != hidpp->battery.voltage_feature_index || report->fap.funcindex_clientid != EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST) return 0; status = hidpp20_battery_map_status_voltage(report->fap.params, &voltage, &level, &charge_type); hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING; if (voltage != hidpp->battery.voltage || status != hidpp->battery.status) { hidpp->battery.voltage = voltage; hidpp->battery.capacity = hidpp20_map_battery_capacity(hidpp->hid_dev, voltage); hidpp->battery.status = status; hidpp->battery.level = level; hidpp->battery.charge_type = charge_type; if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); } return 0; } /* -------------------------------------------------------------------------- */ /* 0x1004: Unified battery */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_UNIFIED_BATTERY 0x1004 #define CMD_UNIFIED_BATTERY_GET_CAPABILITIES 0x00 #define CMD_UNIFIED_BATTERY_GET_STATUS 0x10 #define EVENT_UNIFIED_BATTERY_STATUS_EVENT 0x00 #define FLAG_UNIFIED_BATTERY_LEVEL_CRITICAL BIT(0) #define FLAG_UNIFIED_BATTERY_LEVEL_LOW BIT(1) #define FLAG_UNIFIED_BATTERY_LEVEL_GOOD BIT(2) #define FLAG_UNIFIED_BATTERY_LEVEL_FULL BIT(3) #define FLAG_UNIFIED_BATTERY_FLAGS_RECHARGEABLE BIT(0) #define FLAG_UNIFIED_BATTERY_FLAGS_STATE_OF_CHARGE BIT(1) static int hidpp20_unifiedbattery_get_capabilities(struct hidpp_device *hidpp, u8 feature_index) { struct hidpp_report response; int ret; u8 *params = (u8 *)response.fap.params; if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS || hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_PERCENTAGE) { /* we have already set the device capabilities, so let's skip */ return 0; } ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_UNIFIED_BATTERY_GET_CAPABILITIES, NULL, 0, &response); /* Ignore these intermittent errors */ if (ret == HIDPP_ERROR_RESOURCE_ERROR) return -EIO; if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; /* * If the device supports state of charge (battery percentage) we won't * export the battery level information. there are 4 possible battery * levels and they all are optional, this means that the device might * not support any of them, we are just better off with the battery * percentage. */ if (params[1] & FLAG_UNIFIED_BATTERY_FLAGS_STATE_OF_CHARGE) { hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_PERCENTAGE; hidpp->battery.supported_levels_1004 = 0; } else { hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS; hidpp->battery.supported_levels_1004 = params[0]; } return 0; } static int hidpp20_unifiedbattery_map_status(struct hidpp_device *hidpp, u8 charging_status, u8 external_power_status) { int status; switch (charging_status) { case 0: /* discharging */ status = POWER_SUPPLY_STATUS_DISCHARGING; break; case 1: /* charging */ case 2: /* charging slow */ status = POWER_SUPPLY_STATUS_CHARGING; break; case 3: /* complete */ status = POWER_SUPPLY_STATUS_FULL; break; case 4: /* error */ status = POWER_SUPPLY_STATUS_NOT_CHARGING; hid_info(hidpp->hid_dev, "%s: charging error", hidpp->name); break; default: status = POWER_SUPPLY_STATUS_NOT_CHARGING; break; } return status; } static int hidpp20_unifiedbattery_map_level(struct hidpp_device *hidpp, u8 battery_level) { /* cler unsupported level bits */ battery_level &= hidpp->battery.supported_levels_1004; if (battery_level & FLAG_UNIFIED_BATTERY_LEVEL_FULL) return POWER_SUPPLY_CAPACITY_LEVEL_FULL; else if (battery_level & FLAG_UNIFIED_BATTERY_LEVEL_GOOD) return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; else if (battery_level & FLAG_UNIFIED_BATTERY_LEVEL_LOW) return POWER_SUPPLY_CAPACITY_LEVEL_LOW; else if (battery_level & FLAG_UNIFIED_BATTERY_LEVEL_CRITICAL) return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; return POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; } static int hidpp20_unifiedbattery_get_status(struct hidpp_device *hidpp, u8 feature_index, u8 *state_of_charge, int *status, int *level) { struct hidpp_report response; int ret; u8 *params = (u8 *)response.fap.params; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_UNIFIED_BATTERY_GET_STATUS, NULL, 0, &response); /* Ignore these intermittent errors */ if (ret == HIDPP_ERROR_RESOURCE_ERROR) return -EIO; if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; *state_of_charge = params[0]; *status = hidpp20_unifiedbattery_map_status(hidpp, params[2], params[3]); *level = hidpp20_unifiedbattery_map_level(hidpp, params[1]); return 0; } static int hidpp20_query_battery_info_1004(struct hidpp_device *hidpp) { u8 feature_type; int ret; u8 state_of_charge; int status, level; if (hidpp->battery.feature_index == 0xff) { ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_UNIFIED_BATTERY, &hidpp->battery.feature_index, &feature_type); if (ret) return ret; } ret = hidpp20_unifiedbattery_get_capabilities(hidpp, hidpp->battery.feature_index); if (ret) return ret; ret = hidpp20_unifiedbattery_get_status(hidpp, hidpp->battery.feature_index, &state_of_charge, &status, &level); if (ret) return ret; hidpp->capabilities |= HIDPP_CAPABILITY_UNIFIED_BATTERY; hidpp->battery.capacity = state_of_charge; hidpp->battery.status = status; hidpp->battery.level = level; hidpp->battery.online = true; return 0; } static int hidpp20_battery_event_1004(struct hidpp_device *hidpp, u8 *data, int size) { struct hidpp_report *report = (struct hidpp_report *)data; u8 *params = (u8 *)report->fap.params; int state_of_charge, status, level; bool changed; if (report->fap.feature_index != hidpp->battery.feature_index || report->fap.funcindex_clientid != EVENT_UNIFIED_BATTERY_STATUS_EVENT) return 0; state_of_charge = params[0]; status = hidpp20_unifiedbattery_map_status(hidpp, params[2], params[3]); level = hidpp20_unifiedbattery_map_level(hidpp, params[1]); changed = status != hidpp->battery.status || (state_of_charge != hidpp->battery.capacity && hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_PERCENTAGE) || (level != hidpp->battery.level && hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS); if (changed) { hidpp->battery.capacity = state_of_charge; hidpp->battery.status = status; hidpp->battery.level = level; if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); } return 0; } /* -------------------------------------------------------------------------- */ /* Battery feature helpers */ /* -------------------------------------------------------------------------- */ static enum power_supply_property hidpp_battery_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, POWER_SUPPLY_PROP_SERIAL_NUMBER, 0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY, */ 0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY_LEVEL, */ 0, /* placeholder for POWER_SUPPLY_PROP_VOLTAGE_NOW, */ }; static int hidpp_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct hidpp_device *hidpp = power_supply_get_drvdata(psy); int ret = 0; switch(psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = hidpp->battery.status; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = hidpp->battery.capacity; break; case POWER_SUPPLY_PROP_CAPACITY_LEVEL: val->intval = hidpp->battery.level; break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_DEVICE; break; case POWER_SUPPLY_PROP_ONLINE: val->intval = hidpp->battery.online; break; case POWER_SUPPLY_PROP_MODEL_NAME: if (!strncmp(hidpp->name, "Logitech ", 9)) val->strval = hidpp->name + 9; else val->strval = hidpp->name; break; case POWER_SUPPLY_PROP_MANUFACTURER: val->strval = "Logitech"; break; case POWER_SUPPLY_PROP_SERIAL_NUMBER: val->strval = hidpp->hid_dev->uniq; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: /* hardware reports voltage in mV. sysfs expects uV */ val->intval = hidpp->battery.voltage * 1000; break; case POWER_SUPPLY_PROP_CHARGE_TYPE: val->intval = hidpp->battery.charge_type; break; default: ret = -EINVAL; break; } return ret; } /* -------------------------------------------------------------------------- */ /* 0x1d4b: Wireless device status */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_WIRELESS_DEVICE_STATUS 0x1d4b static int hidpp_get_wireless_feature_index(struct hidpp_device *hidpp, u8 *feature_index) { u8 feature_type; int ret; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_WIRELESS_DEVICE_STATUS, feature_index, &feature_type); return ret; } /* -------------------------------------------------------------------------- */ /* 0x1f20: ADC measurement */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_ADC_MEASUREMENT 0x1f20 #define CMD_ADC_MEASUREMENT_GET_ADC_MEASUREMENT 0x00 #define EVENT_ADC_MEASUREMENT_STATUS_BROADCAST 0x00 static int hidpp20_map_adc_measurement_1f20_capacity(struct hid_device *hid_dev, int voltage) { /* NB: This voltage curve doesn't necessarily map perfectly to all * devices that implement the ADC_MEASUREMENT feature. This is because * there are a few devices that use different battery technology. * * Adapted from: * https://github.com/Sapd/HeadsetControl/blob/acd972be0468e039b93aae81221f20a54d2d60f7/src/devices/logitech_g633_g933_935.c#L44-L52 */ static const int voltages[100] = { 4030, 4024, 4018, 4011, 4003, 3994, 3985, 3975, 3963, 3951, 3937, 3922, 3907, 3893, 3880, 3868, 3857, 3846, 3837, 3828, 3820, 3812, 3805, 3798, 3791, 3785, 3779, 3773, 3768, 3762, 3757, 3752, 3747, 3742, 3738, 3733, 3729, 3724, 3720, 3716, 3712, 3708, 3704, 3700, 3696, 3692, 3688, 3685, 3681, 3677, 3674, 3670, 3667, 3663, 3660, 3657, 3653, 3650, 3646, 3643, 3640, 3637, 3633, 3630, 3627, 3624, 3620, 3617, 3614, 3611, 3608, 3604, 3601, 3598, 3595, 3592, 3589, 3585, 3582, 3579, 3576, 3573, 3569, 3566, 3563, 3560, 3556, 3553, 3550, 3546, 3543, 3539, 3536, 3532, 3529, 3525, 3499, 3466, 3433, 3399, }; int i; if (voltage == 0) return 0; if (unlikely(voltage < 3400 || voltage >= 5000)) hid_warn_once(hid_dev, "%s: possibly using the wrong voltage curve\n", __func__); for (i = 0; i < ARRAY_SIZE(voltages); i++) { if (voltage >= voltages[i]) return ARRAY_SIZE(voltages) - i; } return 0; } static int hidpp20_map_adc_measurement_1f20(u8 data[3], int *voltage) { int status; u8 flags; flags = data[2]; switch (flags) { case 0x01: status = POWER_SUPPLY_STATUS_DISCHARGING; break; case 0x03: status = POWER_SUPPLY_STATUS_CHARGING; break; case 0x07: status = POWER_SUPPLY_STATUS_FULL; break; case 0x0F: default: status = POWER_SUPPLY_STATUS_UNKNOWN; break; } *voltage = get_unaligned_be16(data); dbg_hid("Parsed 1f20 data as flag 0x%02x voltage %dmV\n", flags, *voltage); return status; } /* Return value is whether the device is online */ static bool hidpp20_get_adc_measurement_1f20(struct hidpp_device *hidpp, u8 feature_index, int *status, int *voltage) { struct hidpp_report response; int ret; u8 *params = (u8 *)response.fap.params; *status = POWER_SUPPLY_STATUS_UNKNOWN; *voltage = 0; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_ADC_MEASUREMENT_GET_ADC_MEASUREMENT, NULL, 0, &response); if (ret > 0) { hid_dbg(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return false; } *status = hidpp20_map_adc_measurement_1f20(params, voltage); return true; } static int hidpp20_query_adc_measurement_info_1f20(struct hidpp_device *hidpp) { u8 feature_type; if (hidpp->battery.adc_measurement_feature_index == 0xff) { int ret; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_ADC_MEASUREMENT, &hidpp->battery.adc_measurement_feature_index, &feature_type); if (ret) return ret; hidpp->capabilities |= HIDPP_CAPABILITY_ADC_MEASUREMENT; } hidpp->battery.online = hidpp20_get_adc_measurement_1f20(hidpp, hidpp->battery.adc_measurement_feature_index, &hidpp->battery.status, &hidpp->battery.voltage); hidpp->battery.capacity = hidpp20_map_adc_measurement_1f20_capacity(hidpp->hid_dev, hidpp->battery.voltage); hidpp_update_usb_wireless_status(hidpp); return 0; } static int hidpp20_adc_measurement_event_1f20(struct hidpp_device *hidpp, u8 *data, int size) { struct hidpp_report *report = (struct hidpp_report *)data; int status, voltage; if (report->fap.feature_index != hidpp->battery.adc_measurement_feature_index || report->fap.funcindex_clientid != EVENT_ADC_MEASUREMENT_STATUS_BROADCAST) return 0; status = hidpp20_map_adc_measurement_1f20(report->fap.params, &voltage); hidpp->battery.online = status != POWER_SUPPLY_STATUS_UNKNOWN; if (voltage != hidpp->battery.voltage || status != hidpp->battery.status) { hidpp->battery.status = status; hidpp->battery.voltage = voltage; hidpp->battery.capacity = hidpp20_map_adc_measurement_1f20_capacity(hidpp->hid_dev, voltage); if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); hidpp_update_usb_wireless_status(hidpp); } return 0; } /* -------------------------------------------------------------------------- */ /* 0x2120: Hi-resolution scrolling */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_HI_RESOLUTION_SCROLLING 0x2120 #define CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE 0x10 static int hidpp_hrs_set_highres_scrolling_mode(struct hidpp_device *hidpp, bool enabled, u8 *multiplier) { u8 feature_index; u8 feature_type; int ret; u8 params[1]; struct hidpp_report response; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HI_RESOLUTION_SCROLLING, &feature_index, &feature_type); if (ret) return ret; params[0] = enabled ? BIT(0) : 0; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE, params, sizeof(params), &response); if (ret) return ret; *multiplier = response.fap.params[1]; return 0; } /* -------------------------------------------------------------------------- */ /* 0x2121: HiRes Wheel */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_HIRES_WHEEL 0x2121 #define CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY 0x00 #define CMD_HIRES_WHEEL_SET_WHEEL_MODE 0x20 static int hidpp_hrw_get_wheel_capability(struct hidpp_device *hidpp, u8 *multiplier) { u8 feature_index; u8 feature_type; int ret; struct hidpp_report response; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL, &feature_index, &feature_type); if (ret) goto return_default; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY, NULL, 0, &response); if (ret) goto return_default; *multiplier = response.fap.params[0]; return 0; return_default: hid_warn(hidpp->hid_dev, "Couldn't get wheel multiplier (error %d)\n", ret); return ret; } static int hidpp_hrw_set_wheel_mode(struct hidpp_device *hidpp, bool invert, bool high_resolution, bool use_hidpp) { u8 feature_index; u8 feature_type; int ret; u8 params[1]; struct hidpp_report response; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL, &feature_index, &feature_type); if (ret) return ret; params[0] = (invert ? BIT(2) : 0) | (high_resolution ? BIT(1) : 0) | (use_hidpp ? BIT(0) : 0); return hidpp_send_fap_command_sync(hidpp, feature_index, CMD_HIRES_WHEEL_SET_WHEEL_MODE, params, sizeof(params), &response); } /* -------------------------------------------------------------------------- */ /* 0x4301: Solar Keyboard */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_SOLAR_KEYBOARD 0x4301 #define CMD_SOLAR_SET_LIGHT_MEASURE 0x00 #define EVENT_SOLAR_BATTERY_BROADCAST 0x00 #define EVENT_SOLAR_BATTERY_LIGHT_MEASURE 0x10 #define EVENT_SOLAR_CHECK_LIGHT_BUTTON 0x20 static int hidpp_solar_request_battery_event(struct hidpp_device *hidpp) { struct hidpp_report response; u8 params[2] = { 1, 1 }; u8 feature_type; int ret; if (hidpp->battery.feature_index == 0xff) { ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_SOLAR_KEYBOARD, &hidpp->battery.solar_feature_index, &feature_type); if (ret) return ret; } ret = hidpp_send_fap_command_sync(hidpp, hidpp->battery.solar_feature_index, CMD_SOLAR_SET_LIGHT_MEASURE, params, 2, &response); if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_MILEAGE; return 0; } static int hidpp_solar_battery_event(struct hidpp_device *hidpp, u8 *data, int size) { struct hidpp_report *report = (struct hidpp_report *)data; int capacity, lux, status; u8 function; function = report->fap.funcindex_clientid; if (report->fap.feature_index != hidpp->battery.solar_feature_index || !(function == EVENT_SOLAR_BATTERY_BROADCAST || function == EVENT_SOLAR_BATTERY_LIGHT_MEASURE || function == EVENT_SOLAR_CHECK_LIGHT_BUTTON)) return 0; capacity = report->fap.params[0]; switch (function) { case EVENT_SOLAR_BATTERY_LIGHT_MEASURE: lux = (report->fap.params[1] << 8) | report->fap.params[2]; if (lux > 200) status = POWER_SUPPLY_STATUS_CHARGING; else status = POWER_SUPPLY_STATUS_DISCHARGING; break; case EVENT_SOLAR_CHECK_LIGHT_BUTTON: default: if (capacity < hidpp->battery.capacity) status = POWER_SUPPLY_STATUS_DISCHARGING; else status = POWER_SUPPLY_STATUS_CHARGING; } if (capacity == 100) status = POWER_SUPPLY_STATUS_FULL; hidpp->battery.online = true; if (capacity != hidpp->battery.capacity || status != hidpp->battery.status) { hidpp->battery.capacity = capacity; hidpp->battery.status = status; if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); } return 0; } /* -------------------------------------------------------------------------- */ /* 0x6010: Touchpad FW items */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_TOUCHPAD_FW_ITEMS 0x6010 #define CMD_TOUCHPAD_FW_ITEMS_SET 0x10 struct hidpp_touchpad_fw_items { uint8_t presence; uint8_t desired_state; uint8_t state; uint8_t persistent; }; /* * send a set state command to the device by reading the current items->state * field. items is then filled with the current state. */ static int hidpp_touchpad_fw_items_set(struct hidpp_device *hidpp, u8 feature_index, struct hidpp_touchpad_fw_items *items) { struct hidpp_report response; int ret; u8 *params = (u8 *)response.fap.params; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_TOUCHPAD_FW_ITEMS_SET, &items->state, 1, &response); if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; items->presence = params[0]; items->desired_state = params[1]; items->state = params[2]; items->persistent = params[3]; return 0; } /* -------------------------------------------------------------------------- */ /* 0x6100: TouchPadRawXY */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_TOUCHPAD_RAW_XY 0x6100 #define CMD_TOUCHPAD_GET_RAW_INFO 0x00 #define CMD_TOUCHPAD_SET_RAW_REPORT_STATE 0x20 #define EVENT_TOUCHPAD_RAW_XY 0x00 #define TOUCHPAD_RAW_XY_ORIGIN_LOWER_LEFT 0x01 #define TOUCHPAD_RAW_XY_ORIGIN_UPPER_LEFT 0x03 struct hidpp_touchpad_raw_info { u16 x_size; u16 y_size; u8 z_range; u8 area_range; u8 timestamp_unit; u8 maxcontacts; u8 origin; u16 res; }; struct hidpp_touchpad_raw_xy_finger { u8 contact_type; u8 contact_status; u16 x; u16 y; u8 z; u8 area; u8 finger_id; }; struct hidpp_touchpad_raw_xy { u16 timestamp; struct hidpp_touchpad_raw_xy_finger fingers[2]; u8 spurious_flag; u8 end_of_frame; u8 finger_count; u8 button; }; static int hidpp_touchpad_get_raw_info(struct hidpp_device *hidpp, u8 feature_index, struct hidpp_touchpad_raw_info *raw_info) { struct hidpp_report response; int ret; u8 *params = (u8 *)response.fap.params; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_TOUCHPAD_GET_RAW_INFO, NULL, 0, &response); if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; raw_info->x_size = get_unaligned_be16(&params[0]); raw_info->y_size = get_unaligned_be16(&params[2]); raw_info->z_range = params[4]; raw_info->area_range = params[5]; raw_info->maxcontacts = params[7]; raw_info->origin = params[8]; /* res is given in unit per inch */ raw_info->res = get_unaligned_be16(&params[13]) * 2 / 51; return ret; } static int hidpp_touchpad_set_raw_report_state(struct hidpp_device *hidpp_dev, u8 feature_index, bool send_raw_reports, bool sensor_enhanced_settings) { struct hidpp_report response; /* * Params: * bit 0 - enable raw * bit 1 - 16bit Z, no area * bit 2 - enhanced sensitivity * bit 3 - width, height (4 bits each) instead of area * bit 4 - send raw + gestures (degrades smoothness) * remaining bits - reserved */ u8 params = send_raw_reports | (sensor_enhanced_settings << 2); return hidpp_send_fap_command_sync(hidpp_dev, feature_index, CMD_TOUCHPAD_SET_RAW_REPORT_STATE, &params, 1, &response); } static void hidpp_touchpad_touch_event(u8 *data, struct hidpp_touchpad_raw_xy_finger *finger) { u8 x_m = data[0] << 2; u8 y_m = data[2] << 2; finger->x = x_m << 6 | data[1]; finger->y = y_m << 6 | data[3]; finger->contact_type = data[0] >> 6; finger->contact_status = data[2] >> 6; finger->z = data[4]; finger->area = data[5]; finger->finger_id = data[6] >> 4; } static void hidpp_touchpad_raw_xy_event(struct hidpp_device *hidpp_dev, u8 *data, struct hidpp_touchpad_raw_xy *raw_xy) { memset(raw_xy, 0, sizeof(struct hidpp_touchpad_raw_xy)); raw_xy->end_of_frame = data[8] & 0x01; raw_xy->spurious_flag = (data[8] >> 1) & 0x01; raw_xy->finger_count = data[15] & 0x0f; raw_xy->button = (data[8] >> 2) & 0x01; if (raw_xy->finger_count) { hidpp_touchpad_touch_event(&data[2], &raw_xy->fingers[0]); hidpp_touchpad_touch_event(&data[9], &raw_xy->fingers[1]); } } /* -------------------------------------------------------------------------- */ /* 0x8123: Force feedback support */ /* -------------------------------------------------------------------------- */ #define HIDPP_FF_GET_INFO 0x01 #define HIDPP_FF_RESET_ALL 0x11 #define HIDPP_FF_DOWNLOAD_EFFECT 0x21 #define HIDPP_FF_SET_EFFECT_STATE 0x31 #define HIDPP_FF_DESTROY_EFFECT 0x41 #define HIDPP_FF_GET_APERTURE 0x51 #define HIDPP_FF_SET_APERTURE 0x61 #define HIDPP_FF_GET_GLOBAL_GAINS 0x71 #define HIDPP_FF_SET_GLOBAL_GAINS 0x81 #define HIDPP_FF_EFFECT_STATE_GET 0x00 #define HIDPP_FF_EFFECT_STATE_STOP 0x01 #define HIDPP_FF_EFFECT_STATE_PLAY 0x02 #define HIDPP_FF_EFFECT_STATE_PAUSE 0x03 #define HIDPP_FF_EFFECT_CONSTANT 0x00 #define HIDPP_FF_EFFECT_PERIODIC_SINE 0x01 #define HIDPP_FF_EFFECT_PERIODIC_SQUARE 0x02 #define HIDPP_FF_EFFECT_PERIODIC_TRIANGLE 0x03 #define HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHUP 0x04 #define HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHDOWN 0x05 #define HIDPP_FF_EFFECT_SPRING 0x06 #define HIDPP_FF_EFFECT_DAMPER 0x07 #define HIDPP_FF_EFFECT_FRICTION 0x08 #define HIDPP_FF_EFFECT_INERTIA 0x09 #define HIDPP_FF_EFFECT_RAMP 0x0A #define HIDPP_FF_EFFECT_AUTOSTART 0x80 #define HIDPP_FF_EFFECTID_NONE -1 #define HIDPP_FF_EFFECTID_AUTOCENTER -2 #define HIDPP_AUTOCENTER_PARAMS_LENGTH 18 #define HIDPP_FF_MAX_PARAMS 20 #define HIDPP_FF_RESERVED_SLOTS 1 struct hidpp_ff_private_data { struct hidpp_device *hidpp; u8 feature_index; u8 version; u16 gain; s16 range; u8 slot_autocenter; u8 num_effects; int *effect_ids; struct workqueue_struct *wq; atomic_t workqueue_size; }; struct hidpp_ff_work_data { struct work_struct work; struct hidpp_ff_private_data *data; int effect_id; u8 command; u8 params[HIDPP_FF_MAX_PARAMS]; u8 size; }; static const signed short hidpp_ff_effects[] = { FF_CONSTANT, FF_PERIODIC, FF_SINE, FF_SQUARE, FF_SAW_UP, FF_SAW_DOWN, FF_TRIANGLE, FF_SPRING, FF_DAMPER, FF_AUTOCENTER, FF_GAIN, -1 }; static const signed short hidpp_ff_effects_v2[] = { FF_RAMP, FF_FRICTION, FF_INERTIA, -1 }; static const u8 HIDPP_FF_CONDITION_CMDS[] = { HIDPP_FF_EFFECT_SPRING, HIDPP_FF_EFFECT_FRICTION, HIDPP_FF_EFFECT_DAMPER, HIDPP_FF_EFFECT_INERTIA }; static const char *HIDPP_FF_CONDITION_NAMES[] = { "spring", "friction", "damper", "inertia" }; static u8 hidpp_ff_find_effect(struct hidpp_ff_private_data *data, int effect_id) { int i; for (i = 0; i < data->num_effects; i++) if (data->effect_ids[i] == effect_id) return i+1; return 0; } static void hidpp_ff_work_handler(struct work_struct *w) { struct hidpp_ff_work_data *wd = container_of(w, struct hidpp_ff_work_data, work); struct hidpp_ff_private_data *data = wd->data; struct hidpp_report response; u8 slot; int ret; /* add slot number if needed */ switch (wd->effect_id) { case HIDPP_FF_EFFECTID_AUTOCENTER: wd->params[0] = data->slot_autocenter; break; case HIDPP_FF_EFFECTID_NONE: /* leave slot as zero */ break; default: /* find current slot for effect */ wd->params[0] = hidpp_ff_find_effect(data, wd->effect_id); break; } /* send command and wait for reply */ ret = hidpp_send_fap_command_sync(data->hidpp, data->feature_index, wd->command, wd->params, wd->size, &response); if (ret) { hid_err(data->hidpp->hid_dev, "Failed to send command to device!\n"); goto out; } /* parse return data */ switch (wd->command) { case HIDPP_FF_DOWNLOAD_EFFECT: slot = response.fap.params[0]; if (slot > 0 && slot <= data->num_effects) { if (wd->effect_id >= 0) /* regular effect uploaded */ data->effect_ids[slot-1] = wd->effect_id; else if (wd->effect_id >= HIDPP_FF_EFFECTID_AUTOCENTER) /* autocenter spring uploaded */ data->slot_autocenter = slot; } break; case HIDPP_FF_DESTROY_EFFECT: if (wd->effect_id >= 0) /* regular effect destroyed */ data->effect_ids[wd->params[0]-1] = -1; else if (wd->effect_id >= HIDPP_FF_EFFECTID_AUTOCENTER) /* autocenter spring destoyed */ data->slot_autocenter = 0; break; case HIDPP_FF_SET_GLOBAL_GAINS: data->gain = (wd->params[0] << 8) + wd->params[1]; break; case HIDPP_FF_SET_APERTURE: data->range = (wd->params[0] << 8) + wd->params[1]; break; default: /* no action needed */ break; } out: atomic_dec(&data->workqueue_size); kfree(wd); } static int hidpp_ff_queue_work(struct hidpp_ff_private_data *data, int effect_id, u8 command, u8 *params, u8 size) { struct hidpp_ff_work_data *wd = kzalloc(sizeof(*wd), GFP_KERNEL); int s; if (!wd) return -ENOMEM; INIT_WORK(&wd->work, hidpp_ff_work_handler); wd->data = data; wd->effect_id = effect_id; wd->command = command; wd->size = size; memcpy(wd->params, params, size); s = atomic_inc_return(&data->workqueue_size); queue_work(data->wq, &wd->work); /* warn about excessive queue size */ if (s >= 20 && s % 20 == 0) hid_warn(data->hidpp->hid_dev, "Force feedback command queue contains %d commands, causing substantial delays!", s); return 0; } static int hidpp_ff_upload_effect(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old) { struct hidpp_ff_private_data *data = dev->ff->private; u8 params[20]; u8 size; int force; /* set common parameters */ params[2] = effect->replay.length >> 8; params[3] = effect->replay.length & 255; params[4] = effect->replay.delay >> 8; params[5] = effect->replay.delay & 255; switch (effect->type) { case FF_CONSTANT: force = (effect->u.constant.level * fixp_sin16((effect->direction * 360) >> 16)) >> 15; params[1] = HIDPP_FF_EFFECT_CONSTANT; params[6] = force >> 8; params[7] = force & 255; params[8] = effect->u.constant.envelope.attack_level >> 7; params[9] = effect->u.constant.envelope.attack_length >> 8; params[10] = effect->u.constant.envelope.attack_length & 255; params[11] = effect->u.constant.envelope.fade_level >> 7; params[12] = effect->u.constant.envelope.fade_length >> 8; params[13] = effect->u.constant.envelope.fade_length & 255; size = 14; dbg_hid("Uploading constant force level=%d in dir %d = %d\n", effect->u.constant.level, effect->direction, force); dbg_hid(" envelope attack=(%d, %d ms) fade=(%d, %d ms)\n", effect->u.constant.envelope.attack_level, effect->u.constant.envelope.attack_length, effect->u.constant.envelope.fade_level, effect->u.constant.envelope.fade_length); break; case FF_PERIODIC: { switch (effect->u.periodic.waveform) { case FF_SINE: params[1] = HIDPP_FF_EFFECT_PERIODIC_SINE; break; case FF_SQUARE: params[1] = HIDPP_FF_EFFECT_PERIODIC_SQUARE; break; case FF_SAW_UP: params[1] = HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHUP; break; case FF_SAW_DOWN: params[1] = HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHDOWN; break; case FF_TRIANGLE: params[1] = HIDPP_FF_EFFECT_PERIODIC_TRIANGLE; break; default: hid_err(data->hidpp->hid_dev, "Unexpected periodic waveform type %i!\n", effect->u.periodic.waveform); return -EINVAL; } force = (effect->u.periodic.magnitude * fixp_sin16((effect->direction * 360) >> 16)) >> 15; params[6] = effect->u.periodic.magnitude >> 8; params[7] = effect->u.periodic.magnitude & 255; params[8] = effect->u.periodic.offset >> 8; params[9] = effect->u.periodic.offset & 255; params[10] = effect->u.periodic.period >> 8; params[11] = effect->u.periodic.period & 255; params[12] = effect->u.periodic.phase >> 8; params[13] = effect->u.periodic.phase & 255; params[14] = effect->u.periodic.envelope.attack_level >> 7; params[15] = effect->u.periodic.envelope.attack_length >> 8; params[16] = effect->u.periodic.envelope.attack_length & 255; params[17] = effect->u.periodic.envelope.fade_level >> 7; params[18] = effect->u.periodic.envelope.fade_length >> 8; params[19] = effect->u.periodic.envelope.fade_length & 255; size = 20; dbg_hid("Uploading periodic force mag=%d/dir=%d, offset=%d, period=%d ms, phase=%d\n", effect->u.periodic.magnitude, effect->direction, effect->u.periodic.offset, effect->u.periodic.period, effect->u.periodic.phase); dbg_hid(" envelope attack=(%d, %d ms) fade=(%d, %d ms)\n", effect->u.periodic.envelope.attack_level, effect->u.periodic.envelope.attack_length, effect->u.periodic.envelope.fade_level, effect->u.periodic.envelope.fade_length); break; } case FF_RAMP: params[1] = HIDPP_FF_EFFECT_RAMP; force = (effect->u.ramp.start_level * fixp_sin16((effect->direction * 360) >> 16)) >> 15; params[6] = force >> 8; params[7] = force & 255; force = (effect->u.ramp.end_level * fixp_sin16((effect->direction * 360) >> 16)) >> 15; params[8] = force >> 8; params[9] = force & 255; params[10] = effect->u.ramp.envelope.attack_level >> 7; params[11] = effect->u.ramp.envelope.attack_length >> 8; params[12] = effect->u.ramp.envelope.attack_length & 255; params[13] = effect->u.ramp.envelope.fade_level >> 7; params[14] = effect->u.ramp.envelope.fade_length >> 8; params[15] = effect->u.ramp.envelope.fade_length & 255; size = 16; dbg_hid("Uploading ramp force level=%d -> %d in dir %d = %d\n", effect->u.ramp.start_level, effect->u.ramp.end_level, effect->direction, force); dbg_hid(" envelope attack=(%d, %d ms) fade=(%d, %d ms)\n", effect->u.ramp.envelope.attack_level, effect->u.ramp.envelope.attack_length, effect->u.ramp.envelope.fade_level, effect->u.ramp.envelope.fade_length); break; case FF_FRICTION: case FF_INERTIA: case FF_SPRING: case FF_DAMPER: params[1] = HIDPP_FF_CONDITION_CMDS[effect->type - FF_SPRING]; params[6] = effect->u.condition[0].left_saturation >> 9; params[7] = (effect->u.condition[0].left_saturation >> 1) & 255; params[8] = effect->u.condition[0].left_coeff >> 8; params[9] = effect->u.condition[0].left_coeff & 255; params[10] = effect->u.condition[0].deadband >> 9; params[11] = (effect->u.condition[0].deadband >> 1) & 255; params[12] = effect->u.condition[0].center >> 8; params[13] = effect->u.condition[0].center & 255; params[14] = effect->u.condition[0].right_coeff >> 8; params[15] = effect->u.condition[0].right_coeff & 255; params[16] = effect->u.condition[0].right_saturation >> 9; params[17] = (effect->u.condition[0].right_saturation >> 1) & 255; size = 18; dbg_hid("Uploading %s force left coeff=%d, left sat=%d, right coeff=%d, right sat=%d\n", HIDPP_FF_CONDITION_NAMES[effect->type - FF_SPRING], effect->u.condition[0].left_coeff, effect->u.condition[0].left_saturation, effect->u.condition[0].right_coeff, effect->u.condition[0].right_saturation); dbg_hid(" deadband=%d, center=%d\n", effect->u.condition[0].deadband, effect->u.condition[0].center); break; default: hid_err(data->hidpp->hid_dev, "Unexpected force type %i!\n", effect->type); return -EINVAL; } return hidpp_ff_queue_work(data, effect->id, HIDPP_FF_DOWNLOAD_EFFECT, params, size); } static int hidpp_ff_playback(struct input_dev *dev, int effect_id, int value) { struct hidpp_ff_private_data *data = dev->ff->private; u8 params[2]; params[1] = value ? HIDPP_FF_EFFECT_STATE_PLAY : HIDPP_FF_EFFECT_STATE_STOP; dbg_hid("St%sing playback of effect %d.\n", value?"art":"opp", effect_id); return hidpp_ff_queue_work(data, effect_id, HIDPP_FF_SET_EFFECT_STATE, params, ARRAY_SIZE(params)); } static int hidpp_ff_erase_effect(struct input_dev *dev, int effect_id) { struct hidpp_ff_private_data *data = dev->ff->private; u8 slot = 0; dbg_hid("Erasing effect %d.\n", effect_id); return hidpp_ff_queue_work(data, effect_id, HIDPP_FF_DESTROY_EFFECT, &slot, 1); } static void hidpp_ff_set_autocenter(struct input_dev *dev, u16 magnitude) { struct hidpp_ff_private_data *data = dev->ff->private; u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH]; dbg_hid("Setting autocenter to %d.\n", magnitude); /* start a standard spring effect */ params[1] = HIDPP_FF_EFFECT_SPRING | HIDPP_FF_EFFECT_AUTOSTART; /* zero delay and duration */ params[2] = params[3] = params[4] = params[5] = 0; /* set coeff to 25% of saturation */ params[8] = params[14] = magnitude >> 11; params[9] = params[15] = (magnitude >> 3) & 255; params[6] = params[16] = magnitude >> 9; params[7] = params[17] = (magnitude >> 1) & 255; /* zero deadband and center */ params[10] = params[11] = params[12] = params[13] = 0; hidpp_ff_queue_work(data, HIDPP_FF_EFFECTID_AUTOCENTER, HIDPP_FF_DOWNLOAD_EFFECT, params, ARRAY_SIZE(params)); } static void hidpp_ff_set_gain(struct input_dev *dev, u16 gain) { struct hidpp_ff_private_data *data = dev->ff->private; u8 params[4]; dbg_hid("Setting gain to %d.\n", gain); params[0] = gain >> 8; params[1] = gain & 255; params[2] = 0; /* no boost */ params[3] = 0; hidpp_ff_queue_work(data, HIDPP_FF_EFFECTID_NONE, HIDPP_FF_SET_GLOBAL_GAINS, params, ARRAY_SIZE(params)); } static ssize_t hidpp_ff_range_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hid = to_hid_device(dev); struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); struct input_dev *idev = hidinput->input; struct hidpp_ff_private_data *data = idev->ff->private; return scnprintf(buf, PAGE_SIZE, "%u\n", data->range); } static ssize_t hidpp_ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hid = to_hid_device(dev); struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); struct input_dev *idev = hidinput->input; struct hidpp_ff_private_data *data = idev->ff->private; u8 params[2]; int range = simple_strtoul(buf, NULL, 10); range = clamp(range, 180, 900); params[0] = range >> 8; params[1] = range & 0x00FF; hidpp_ff_queue_work(data, -1, HIDPP_FF_SET_APERTURE, params, ARRAY_SIZE(params)); return count; } static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, hidpp_ff_range_show, hidpp_ff_range_store); static void hidpp_ff_destroy(struct ff_device *ff) { struct hidpp_ff_private_data *data = ff->private; struct hid_device *hid = data->hidpp->hid_dev; hid_info(hid, "Unloading HID++ force feedback.\n"); device_remove_file(&hid->dev, &dev_attr_range); destroy_workqueue(data->wq); kfree(data->effect_ids); } static int hidpp_ff_init(struct hidpp_device *hidpp, struct hidpp_ff_private_data *data) { struct hid_device *hid = hidpp->hid_dev; struct hid_input *hidinput; struct input_dev *dev; struct usb_device_descriptor *udesc; u16 bcdDevice; struct ff_device *ff; int error, j, num_slots = data->num_effects; u8 version; if (!hid_is_usb(hid)) { hid_err(hid, "device is not USB\n"); return -ENODEV; } if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); return -ENODEV; } hidinput = list_entry(hid->inputs.next, struct hid_input, list); dev = hidinput->input; if (!dev) { hid_err(hid, "Struct input_dev not set!\n"); return -EINVAL; } /* Get firmware release */ udesc = &(hid_to_usb_dev(hid)->descriptor); bcdDevice = le16_to_cpu(udesc->bcdDevice); version = bcdDevice & 255; /* Set supported force feedback capabilities */ for (j = 0; hidpp_ff_effects[j] >= 0; j++) set_bit(hidpp_ff_effects[j], dev->ffbit); if (version > 1) for (j = 0; hidpp_ff_effects_v2[j] >= 0; j++) set_bit(hidpp_ff_effects_v2[j], dev->ffbit); error = input_ff_create(dev, num_slots); if (error) { hid_err(dev, "Failed to create FF device!\n"); return error; } /* * Create a copy of passed data, so we can transfer memory * ownership to FF core */ data = kmemdup(data, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->effect_ids = kcalloc(num_slots, sizeof(int), GFP_KERNEL); if (!data->effect_ids) { kfree(data); return -ENOMEM; } data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue"); if (!data->wq) { kfree(data->effect_ids); kfree(data); return -ENOMEM; } data->hidpp = hidpp; data->version = version; for (j = 0; j < num_slots; j++) data->effect_ids[j] = -1; ff = dev->ff; ff->private = data; ff->upload = hidpp_ff_upload_effect; ff->erase = hidpp_ff_erase_effect; ff->playback = hidpp_ff_playback; ff->set_gain = hidpp_ff_set_gain; ff->set_autocenter = hidpp_ff_set_autocenter; ff->destroy = hidpp_ff_destroy; /* Create sysfs interface */ error = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range); if (error) hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d!\n", error); /* init the hardware command queue */ atomic_set(&data->workqueue_size, 0); hid_info(hid, "Force feedback support loaded (firmware release %d).\n", version); return 0; } /* ************************************************************************** */ /* */ /* Device Support */ /* */ /* ************************************************************************** */ /* -------------------------------------------------------------------------- */ /* Touchpad HID++ devices */ /* -------------------------------------------------------------------------- */ #define WTP_MANUAL_RESOLUTION 39 struct wtp_data { u16 x_size, y_size; u8 finger_count; u8 mt_feature_index; u8 button_feature_index; u8 maxcontacts; bool flip_y; unsigned int resolution; }; static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { return -1; } static void wtp_populate_input(struct hidpp_device *hidpp, struct input_dev *input_dev) { struct wtp_data *wd = hidpp->private_data; __set_bit(EV_ABS, input_dev->evbit); __set_bit(EV_KEY, input_dev->evbit); __clear_bit(EV_REL, input_dev->evbit); __clear_bit(EV_LED, input_dev->evbit); input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, wd->x_size, 0, 0); input_abs_set_res(input_dev, ABS_MT_POSITION_X, wd->resolution); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, wd->y_size, 0, 0); input_abs_set_res(input_dev, ABS_MT_POSITION_Y, wd->resolution); /* Max pressure is not given by the devices, pick one */ input_set_abs_params(input_dev, ABS_MT_PRESSURE, 0, 50, 0, 0); input_set_capability(input_dev, EV_KEY, BTN_LEFT); if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) input_set_capability(input_dev, EV_KEY, BTN_RIGHT); else __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit); input_mt_init_slots(input_dev, wd->maxcontacts, INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED); } static void wtp_touch_event(struct hidpp_device *hidpp, struct hidpp_touchpad_raw_xy_finger *touch_report) { struct wtp_data *wd = hidpp->private_data; int slot; if (!touch_report->finger_id || touch_report->contact_type) /* no actual data */ return; slot = input_mt_get_slot_by_key(hidpp->input, touch_report->finger_id); input_mt_slot(hidpp->input, slot); input_mt_report_slot_state(hidpp->input, MT_TOOL_FINGER, touch_report->contact_status); if (touch_report->contact_status) { input_event(hidpp->input, EV_ABS, ABS_MT_POSITION_X, touch_report->x); input_event(hidpp->input, EV_ABS, ABS_MT_POSITION_Y, wd->flip_y ? wd->y_size - touch_report->y : touch_report->y); input_event(hidpp->input, EV_ABS, ABS_MT_PRESSURE, touch_report->area); } } static void wtp_send_raw_xy_event(struct hidpp_device *hidpp, struct hidpp_touchpad_raw_xy *raw) { int i; for (i = 0; i < 2; i++) wtp_touch_event(hidpp, &(raw->fingers[i])); if (raw->end_of_frame && !(hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS)) input_event(hidpp->input, EV_KEY, BTN_LEFT, raw->button); if (raw->end_of_frame || raw->finger_count <= 2) { input_mt_sync_frame(hidpp->input); input_sync(hidpp->input); } } static int wtp_mouse_raw_xy_event(struct hidpp_device *hidpp, u8 *data) { struct wtp_data *wd = hidpp->private_data; u8 c1_area = ((data[7] & 0xf) * (data[7] & 0xf) + (data[7] >> 4) * (data[7] >> 4)) / 2; u8 c2_area = ((data[13] & 0xf) * (data[13] & 0xf) + (data[13] >> 4) * (data[13] >> 4)) / 2; struct hidpp_touchpad_raw_xy raw = { .timestamp = data[1], .fingers = { { .contact_type = 0, .contact_status = !!data[7], .x = get_unaligned_le16(&data[3]), .y = get_unaligned_le16(&data[5]), .z = c1_area, .area = c1_area, .finger_id = data[2], }, { .contact_type = 0, .contact_status = !!data[13], .x = get_unaligned_le16(&data[9]), .y = get_unaligned_le16(&data[11]), .z = c2_area, .area = c2_area, .finger_id = data[8], } }, .finger_count = wd->maxcontacts, .spurious_flag = 0, .end_of_frame = (data[0] >> 7) == 0, .button = data[0] & 0x01, }; wtp_send_raw_xy_event(hidpp, &raw); return 1; } static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); struct wtp_data *wd = hidpp->private_data; struct hidpp_report *report = (struct hidpp_report *)data; struct hidpp_touchpad_raw_xy raw; if (!wd || !hidpp->input) return 1; switch (data[0]) { case 0x02: if (size < 2) { hid_err(hdev, "Received HID report of bad size (%d)", size); return 1; } if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) { input_event(hidpp->input, EV_KEY, BTN_LEFT, !!(data[1] & 0x01)); input_event(hidpp->input, EV_KEY, BTN_RIGHT, !!(data[1] & 0x02)); input_sync(hidpp->input); return 0; } else { if (size < 21) return 1; return wtp_mouse_raw_xy_event(hidpp, &data[7]); } case REPORT_ID_HIDPP_LONG: /* size is already checked in hidpp_raw_event. */ if ((report->fap.feature_index != wd->mt_feature_index) || (report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY)) return 1; hidpp_touchpad_raw_xy_event(hidpp, data + 4, &raw); wtp_send_raw_xy_event(hidpp, &raw); return 0; } return 0; } static int wtp_get_config(struct hidpp_device *hidpp) { struct wtp_data *wd = hidpp->private_data; struct hidpp_touchpad_raw_info raw_info = {0}; u8 feature_type; int ret; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_TOUCHPAD_RAW_XY, &wd->mt_feature_index, &feature_type); if (ret) /* means that the device is not powered up */ return ret; ret = hidpp_touchpad_get_raw_info(hidpp, wd->mt_feature_index, &raw_info); if (ret) return ret; wd->x_size = raw_info.x_size; wd->y_size = raw_info.y_size; wd->maxcontacts = raw_info.maxcontacts; wd->flip_y = raw_info.origin == TOUCHPAD_RAW_XY_ORIGIN_LOWER_LEFT; wd->resolution = raw_info.res; if (!wd->resolution) wd->resolution = WTP_MANUAL_RESOLUTION; return 0; } static int wtp_allocate(struct hid_device *hdev, const struct hid_device_id *id) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); struct wtp_data *wd; wd = devm_kzalloc(&hdev->dev, sizeof(struct wtp_data), GFP_KERNEL); if (!wd) return -ENOMEM; hidpp->private_data = wd; return 0; }; static int wtp_connect(struct hid_device *hdev) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); struct wtp_data *wd = hidpp->private_data; int ret; if (!wd->x_size) { ret = wtp_get_config(hidpp); if (ret) { hid_err(hdev, "Can not get wtp config: %d\n", ret); return ret; } } return hidpp_touchpad_set_raw_report_state(hidpp, wd->mt_feature_index, true, true); } /* ------------------------------------------------------------------------- */ /* Logitech M560 devices */ /* ------------------------------------------------------------------------- */ /* * Logitech M560 protocol overview * * The Logitech M560 mouse, is designed for windows 8. When the middle and/or * the sides buttons are pressed, it sends some keyboard keys events * instead of buttons ones. * To complicate things further, the middle button keys sequence * is different from the odd press and the even press. * * forward button -> Super_R * backward button -> Super_L+'d' (press only) * middle button -> 1st time: Alt_L+SuperL+XF86TouchpadOff (press only) * 2nd time: left-click (press only) * NB: press-only means that when the button is pressed, the * KeyPress/ButtonPress and KeyRelease/ButtonRelease events are generated * together sequentially; instead when the button is released, no event is * generated ! * * With the command * 10<xx>0a 3500af03 (where <xx> is the mouse id), * the mouse reacts differently: * - it never sends a keyboard key event * - for the three mouse button it sends: * middle button press 11<xx>0a 3500af00... * side 1 button (forward) press 11<xx>0a 3500b000... * side 2 button (backward) press 11<xx>0a 3500ae00... * middle/side1/side2 button release 11<xx>0a 35000000... */ static const u8 m560_config_parameter[] = {0x00, 0xaf, 0x03}; /* how buttons are mapped in the report */ #define M560_MOUSE_BTN_LEFT 0x01 #define M560_MOUSE_BTN_RIGHT 0x02 #define M560_MOUSE_BTN_WHEEL_LEFT 0x08 #define M560_MOUSE_BTN_WHEEL_RIGHT 0x10 #define M560_SUB_ID 0x0a #define M560_BUTTON_MODE_REGISTER 0x35 static int m560_send_config_command(struct hid_device *hdev) { struct hidpp_report response; struct hidpp_device *hidpp_dev; hidpp_dev = hid_get_drvdata(hdev); return hidpp_send_rap_command_sync( hidpp_dev, REPORT_ID_HIDPP_SHORT, M560_SUB_ID, M560_BUTTON_MODE_REGISTER, (u8 *)m560_config_parameter, sizeof(m560_config_parameter), &response ); } static int m560_raw_event(struct hid_device *hdev, u8 *data, int size) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); /* sanity check */ if (!hidpp->input) { hid_err(hdev, "error in parameter\n"); return -EINVAL; } if (size < 7) { hid_err(hdev, "error in report\n"); return 0; } if (data[0] == REPORT_ID_HIDPP_LONG && data[2] == M560_SUB_ID && data[6] == 0x00) { /* * m560 mouse report for middle, forward and backward button * * data[0] = 0x11 * data[1] = device-id * data[2] = 0x0a * data[5] = 0xaf -> middle * 0xb0 -> forward * 0xae -> backward * 0x00 -> release all * data[6] = 0x00 */ switch (data[5]) { case 0xaf: input_report_key(hidpp->input, BTN_MIDDLE, 1); break; case 0xb0: input_report_key(hidpp->input, BTN_FORWARD, 1); break; case 0xae: input_report_key(hidpp->input, BTN_BACK, 1); break; case 0x00: input_report_key(hidpp->input, BTN_BACK, 0); input_report_key(hidpp->input, BTN_FORWARD, 0); input_report_key(hidpp->input, BTN_MIDDLE, 0); break; default: hid_err(hdev, "error in report\n"); return 0; } input_sync(hidpp->input); } else if (data[0] == 0x02) { /* * Logitech M560 mouse report * * data[0] = type (0x02) * data[1..2] = buttons * data[3..5] = xy * data[6] = wheel */ int v; input_report_key(hidpp->input, BTN_LEFT, !!(data[1] & M560_MOUSE_BTN_LEFT)); input_report_key(hidpp->input, BTN_RIGHT, !!(data[1] & M560_MOUSE_BTN_RIGHT)); if (data[1] & M560_MOUSE_BTN_WHEEL_LEFT) { input_report_rel(hidpp->input, REL_HWHEEL, -1); input_report_rel(hidpp->input, REL_HWHEEL_HI_RES, -120); } else if (data[1] & M560_MOUSE_BTN_WHEEL_RIGHT) { input_report_rel(hidpp->input, REL_HWHEEL, 1); input_report_rel(hidpp->input, REL_HWHEEL_HI_RES, 120); } v = hid_snto32(hid_field_extract(hdev, data+3, 0, 12), 12); input_report_rel(hidpp->input, REL_X, v); v = hid_snto32(hid_field_extract(hdev, data+3, 12, 12), 12); input_report_rel(hidpp->input, REL_Y, v); v = hid_snto32(data[6], 8); if (v != 0) hidpp_scroll_counter_handle_scroll(hidpp->input, &hidpp->vertical_wheel_counter, v); input_sync(hidpp->input); } return 1; } static void m560_populate_input(struct hidpp_device *hidpp, struct input_dev *input_dev) { __set_bit(EV_KEY, input_dev->evbit); __set_bit(BTN_MIDDLE, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_BACK, input_dev->keybit); __set_bit(BTN_FORWARD, input_dev->keybit); __set_bit(EV_REL, input_dev->evbit); __set_bit(REL_X, input_dev->relbit); __set_bit(REL_Y, input_dev->relbit); __set_bit(REL_WHEEL, input_dev->relbit); __set_bit(REL_HWHEEL, input_dev->relbit); __set_bit(REL_WHEEL_HI_RES, input_dev->relbit); __set_bit(REL_HWHEEL_HI_RES, input_dev->relbit); } static int m560_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { return -1; } /* ------------------------------------------------------------------------- */ /* Logitech K400 devices */ /* ------------------------------------------------------------------------- */ /* * The Logitech K400 keyboard has an embedded touchpad which is seen * as a mouse from the OS point of view. There is a hardware shortcut to disable * tap-to-click but the setting is not remembered accross reset, annoying some * users. * * We can toggle this feature from the host by using the feature 0x6010: * Touchpad FW items */ struct k400_private_data { u8 feature_index; }; static int k400_disable_tap_to_click(struct hidpp_device *hidpp) { struct k400_private_data *k400 = hidpp->private_data; struct hidpp_touchpad_fw_items items = {}; int ret; u8 feature_type; if (!k400->feature_index) { ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_TOUCHPAD_FW_ITEMS, &k400->feature_index, &feature_type); if (ret) /* means that the device is not powered up */ return ret; } ret = hidpp_touchpad_fw_items_set(hidpp, k400->feature_index, &items); if (ret) return ret; return 0; } static int k400_allocate(struct hid_device *hdev) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); struct k400_private_data *k400; k400 = devm_kzalloc(&hdev->dev, sizeof(struct k400_private_data), GFP_KERNEL); if (!k400) return -ENOMEM; hidpp->private_data = k400; return 0; }; static int k400_connect(struct hid_device *hdev) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); if (!disable_tap_to_click) return 0; return k400_disable_tap_to_click(hidpp); } /* ------------------------------------------------------------------------- */ /* Logitech G920 Driving Force Racing Wheel for Xbox One */ /* ------------------------------------------------------------------------- */ #define HIDPP_PAGE_G920_FORCE_FEEDBACK 0x8123 static int g920_ff_set_autocenter(struct hidpp_device *hidpp, struct hidpp_ff_private_data *data) { struct hidpp_report response; u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH] = { [1] = HIDPP_FF_EFFECT_SPRING | HIDPP_FF_EFFECT_AUTOSTART, }; int ret; /* initialize with zero autocenter to get wheel in usable state */ dbg_hid("Setting autocenter to 0.\n"); ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, HIDPP_FF_DOWNLOAD_EFFECT, params, ARRAY_SIZE(params), &response); if (ret) hid_warn(hidpp->hid_dev, "Failed to autocenter device!\n"); else data->slot_autocenter = response.fap.params[0]; return ret; } static int g920_get_config(struct hidpp_device *hidpp, struct hidpp_ff_private_data *data) { struct hidpp_report response; u8 feature_type; int ret; memset(data, 0, sizeof(*data)); /* Find feature and store for later use */ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_G920_FORCE_FEEDBACK, &data->feature_index, &feature_type); if (ret) return ret; /* Read number of slots available in device */ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, HIDPP_FF_GET_INFO, NULL, 0, &response); if (ret) { if (ret < 0) return ret; hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } data->num_effects = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS; /* reset all forces */ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, HIDPP_FF_RESET_ALL, NULL, 0, &response); if (ret) hid_warn(hidpp->hid_dev, "Failed to reset all forces!\n"); ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, HIDPP_FF_GET_APERTURE, NULL, 0, &response); if (ret) { hid_warn(hidpp->hid_dev, "Failed to read range from device!\n"); } data->range = ret ? 900 : get_unaligned_be16(&response.fap.params[0]); /* Read the current gain values */ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, HIDPP_FF_GET_GLOBAL_GAINS, NULL, 0, &response); if (ret) hid_warn(hidpp->hid_dev, "Failed to read gain values from device!\n"); data->gain = ret ? 0xffff : get_unaligned_be16(&response.fap.params[0]); /* ignore boost value at response.fap.params[2] */ return g920_ff_set_autocenter(hidpp, data); } /* -------------------------------------------------------------------------- */ /* Logitech Dinovo Mini keyboard with builtin touchpad */ /* -------------------------------------------------------------------------- */ #define DINOVO_MINI_PRODUCT_ID 0xb30c static int lg_dinovo_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR) return 0; switch (usage->hid & HID_USAGE) { case 0x00d: lg_map_key_clear(KEY_MEDIA); break; default: return 0; } return 1; } /* -------------------------------------------------------------------------- */ /* HID++1.0 devices which use HID++ reports for their wheels */ /* -------------------------------------------------------------------------- */ static int hidpp10_wheel_connect(struct hidpp_device *hidpp) { return hidpp10_set_register(hidpp, HIDPP_REG_ENABLE_REPORTS, 0, HIDPP_ENABLE_WHEEL_REPORT | HIDPP_ENABLE_HWHEEL_REPORT, HIDPP_ENABLE_WHEEL_REPORT | HIDPP_ENABLE_HWHEEL_REPORT); } static int hidpp10_wheel_raw_event(struct hidpp_device *hidpp, u8 *data, int size) { s8 value, hvalue; if (!hidpp->input) return -EINVAL; if (size < 7) return 0; if (data[0] != REPORT_ID_HIDPP_SHORT || data[2] != HIDPP_SUB_ID_ROLLER) return 0; value = data[3]; hvalue = data[4]; input_report_rel(hidpp->input, REL_WHEEL, value); input_report_rel(hidpp->input, REL_WHEEL_HI_RES, value * 120); input_report_rel(hidpp->input, REL_HWHEEL, hvalue); input_report_rel(hidpp->input, REL_HWHEEL_HI_RES, hvalue * 120); input_sync(hidpp->input); return 1; } static void hidpp10_wheel_populate_input(struct hidpp_device *hidpp, struct input_dev *input_dev) { __set_bit(EV_REL, input_dev->evbit); __set_bit(REL_WHEEL, input_dev->relbit); __set_bit(REL_WHEEL_HI_RES, input_dev->relbit); __set_bit(REL_HWHEEL, input_dev->relbit); __set_bit(REL_HWHEEL_HI_RES, input_dev->relbit); } /* -------------------------------------------------------------------------- */ /* HID++1.0 mice which use HID++ reports for extra mouse buttons */ /* -------------------------------------------------------------------------- */ static int hidpp10_extra_mouse_buttons_connect(struct hidpp_device *hidpp) { return hidpp10_set_register(hidpp, HIDPP_REG_ENABLE_REPORTS, 0, HIDPP_ENABLE_MOUSE_EXTRA_BTN_REPORT, HIDPP_ENABLE_MOUSE_EXTRA_BTN_REPORT); } static int hidpp10_extra_mouse_buttons_raw_event(struct hidpp_device *hidpp, u8 *data, int size) { int i; if (!hidpp->input) return -EINVAL; if (size < 7) return 0; if (data[0] != REPORT_ID_HIDPP_SHORT || data[2] != HIDPP_SUB_ID_MOUSE_EXTRA_BTNS) return 0; /* * Buttons are either delivered through the regular mouse report *or* * through the extra buttons report. At least for button 6 how it is * delivered differs per receiver firmware version. Even receivers with * the same usb-id show different behavior, so we handle both cases. */ for (i = 0; i < 8; i++) input_report_key(hidpp->input, BTN_MOUSE + i, (data[3] & (1 << i))); /* Some mice report events on button 9+, use BTN_MISC */ for (i = 0; i < 8; i++) input_report_key(hidpp->input, BTN_MISC + i, (data[4] & (1 << i))); input_sync(hidpp->input); return 1; } static void hidpp10_extra_mouse_buttons_populate_input( struct hidpp_device *hidpp, struct input_dev *input_dev) { /* BTN_MOUSE - BTN_MOUSE+7 are set already by the descriptor */ __set_bit(BTN_0, input_dev->keybit); __set_bit(BTN_1, input_dev->keybit); __set_bit(BTN_2, input_dev->keybit); __set_bit(BTN_3, input_dev->keybit); __set_bit(BTN_4, input_dev->keybit); __set_bit(BTN_5, input_dev->keybit); __set_bit(BTN_6, input_dev->keybit); __set_bit(BTN_7, input_dev->keybit); } /* -------------------------------------------------------------------------- */ /* HID++1.0 kbds which only report 0x10xx consumer usages through sub-id 0x03 */ /* -------------------------------------------------------------------------- */ /* Find the consumer-page input report desc and change Maximums to 0x107f */ static u8 *hidpp10_consumer_keys_report_fixup(struct hidpp_device *hidpp, u8 *_rdesc, unsigned int *rsize) { /* Note 0 terminated so we can use strnstr to search for this. */ static const char consumer_rdesc_start[] = { 0x05, 0x0C, /* USAGE_PAGE (Consumer Devices) */ 0x09, 0x01, /* USAGE (Consumer Control) */ 0xA1, 0x01, /* COLLECTION (Application) */ 0x85, 0x03, /* REPORT_ID = 3 */ 0x75, 0x10, /* REPORT_SIZE (16) */ 0x95, 0x02, /* REPORT_COUNT (2) */ 0x15, 0x01, /* LOGICAL_MIN (1) */ 0x26, 0x00 /* LOGICAL_MAX (... */ }; char *consumer_rdesc, *rdesc = (char *)_rdesc; unsigned int size; consumer_rdesc = strnstr(rdesc, consumer_rdesc_start, *rsize); size = *rsize - (consumer_rdesc - rdesc); if (consumer_rdesc && size >= 25) { consumer_rdesc[15] = 0x7f; consumer_rdesc[16] = 0x10; consumer_rdesc[20] = 0x7f; consumer_rdesc[21] = 0x10; } return _rdesc; } static int hidpp10_consumer_keys_connect(struct hidpp_device *hidpp) { return hidpp10_set_register(hidpp, HIDPP_REG_ENABLE_REPORTS, 0, HIDPP_ENABLE_CONSUMER_REPORT, HIDPP_ENABLE_CONSUMER_REPORT); } static int hidpp10_consumer_keys_raw_event(struct hidpp_device *hidpp, u8 *data, int size) { u8 consumer_report[5]; if (size < 7) return 0; if (data[0] != REPORT_ID_HIDPP_SHORT || data[2] != HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS) return 0; /* * Build a normal consumer report (3) out of the data, this detour * is necessary to get some keyboards to report their 0x10xx usages. */ consumer_report[0] = 0x03; memcpy(&consumer_report[1], &data[3], 4); /* We are called from atomic context */ hid_report_raw_event(hidpp->hid_dev, HID_INPUT_REPORT, consumer_report, 5, 1); return 1; } /* -------------------------------------------------------------------------- */ /* High-resolution scroll wheels */ /* -------------------------------------------------------------------------- */ static int hi_res_scroll_enable(struct hidpp_device *hidpp) { int ret; u8 multiplier = 1; if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL) { ret = hidpp_hrw_set_wheel_mode(hidpp, false, true, false); if (ret == 0) ret = hidpp_hrw_get_wheel_capability(hidpp, &multiplier); } else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL) { ret = hidpp_hrs_set_highres_scrolling_mode(hidpp, true, &multiplier); } else /* if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL) */ { ret = hidpp10_enable_scrolling_acceleration(hidpp); multiplier = 8; } if (ret) { hid_dbg(hidpp->hid_dev, "Could not enable hi-res scrolling: %d\n", ret); return ret; } if (multiplier == 0) { hid_dbg(hidpp->hid_dev, "Invalid multiplier 0 from device, setting it to 1\n"); multiplier = 1; } hidpp->vertical_wheel_counter.wheel_multiplier = multiplier; hid_dbg(hidpp->hid_dev, "wheel multiplier = %d\n", multiplier); return 0; } static int hidpp_initialize_hires_scroll(struct hidpp_device *hidpp) { int ret; unsigned long capabilities; capabilities = hidpp->capabilities; if (hidpp->protocol_major >= 2) { u8 feature_index; u8 feature_type; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL, &feature_index, &feature_type); if (!ret) { hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL; hid_dbg(hidpp->hid_dev, "Detected HID++ 2.0 hi-res scroll wheel\n"); return 0; } ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HI_RESOLUTION_SCROLLING, &feature_index, &feature_type); if (!ret) { hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL; hid_dbg(hidpp->hid_dev, "Detected HID++ 2.0 hi-res scrolling\n"); } } else { /* We cannot detect fast scrolling support on HID++ 1.0 devices */ if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) { hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL; hid_dbg(hidpp->hid_dev, "Detected HID++ 1.0 fast scroll\n"); } } if (hidpp->capabilities == capabilities) hid_dbg(hidpp->hid_dev, "Did not detect HID++ hi-res scrolling hardware support\n"); return 0; } /* -------------------------------------------------------------------------- */ /* Generic HID++ devices */ /* -------------------------------------------------------------------------- */ static u8 *hidpp_report_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *rsize) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); if (!hidpp) return rdesc; /* For 27 MHz keyboards the quirk gets set after hid_parse. */ if (hdev->group == HID_GROUP_LOGITECH_27MHZ_DEVICE || (hidpp->quirks & HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS)) rdesc = hidpp10_consumer_keys_report_fixup(hidpp, rdesc, rsize); return rdesc; } static int hidpp_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); if (!hidpp) return 0; if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) return wtp_input_mapping(hdev, hi, field, usage, bit, max); else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560 && field->application != HID_GD_MOUSE) return m560_input_mapping(hdev, hi, field, usage, bit, max); if (hdev->product == DINOVO_MINI_PRODUCT_ID) return lg_dinovo_input_mapping(hdev, hi, field, usage, bit, max); return 0; } static int hidpp_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); if (!hidpp) return 0; /* Ensure that Logitech G920 is not given a default fuzz/flat value */ if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) { if (usage->type == EV_ABS && (usage->code == ABS_X || usage->code == ABS_Y || usage->code == ABS_Z || usage->code == ABS_RZ)) { field->application = HID_GD_MULTIAXIS; } } return 0; } static void hidpp_populate_input(struct hidpp_device *hidpp, struct input_dev *input) { hidpp->input = input; if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) wtp_populate_input(hidpp, input); else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) m560_populate_input(hidpp, input); if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS) hidpp10_wheel_populate_input(hidpp, input); if (hidpp->quirks & HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS) hidpp10_extra_mouse_buttons_populate_input(hidpp, input); } static int hidpp_input_configured(struct hid_device *hdev, struct hid_input *hidinput) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); struct input_dev *input = hidinput->input; if (!hidpp) return 0; hidpp_populate_input(hidpp, input); return 0; } static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data, int size) { struct hidpp_report *question = hidpp->send_receive_buf; struct hidpp_report *answer = hidpp->send_receive_buf; struct hidpp_report *report = (struct hidpp_report *)data; int ret; /* * If the mutex is locked then we have a pending answer from a * previously sent command. */ if (unlikely(mutex_is_locked(&hidpp->send_mutex))) { /* * Check for a correct hidpp20 answer or the corresponding * error */ if (hidpp_match_answer(question, report) || hidpp_match_error(question, report)) { *answer = *report; hidpp->answer_available = true; wake_up(&hidpp->wait); /* * This was an answer to a command that this driver sent * We return 1 to hid-core to avoid forwarding the * command upstream as it has been treated by the driver */ return 1; } } if (unlikely(hidpp_report_is_connect_event(hidpp, report))) { if (schedule_work(&hidpp->work) == 0) dbg_hid("%s: connect event already queued\n", __func__); return 1; } if (hidpp->hid_dev->group == HID_GROUP_LOGITECH_27MHZ_DEVICE && data[0] == REPORT_ID_HIDPP_SHORT && data[2] == HIDPP_SUB_ID_USER_IFACE_EVENT && (data[3] & HIDPP_USER_IFACE_EVENT_ENCRYPTION_KEY_LOST)) { dev_err_ratelimited(&hidpp->hid_dev->dev, "Error the keyboard's wireless encryption key has been lost, your keyboard will not work unless you re-configure encryption.\n"); dev_err_ratelimited(&hidpp->hid_dev->dev, "See: https://gitlab.freedesktop.org/jwrdegoede/logitech-27mhz-keyboard-encryption-setup/\n"); } if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) { ret = hidpp20_battery_event_1000(hidpp, data, size); if (ret != 0) return ret; ret = hidpp20_battery_event_1004(hidpp, data, size); if (ret != 0) return ret; ret = hidpp_solar_battery_event(hidpp, data, size); if (ret != 0) return ret; ret = hidpp20_battery_voltage_event(hidpp, data, size); if (ret != 0) return ret; ret = hidpp20_adc_measurement_event_1f20(hidpp, data, size); if (ret != 0) return ret; } if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) { ret = hidpp10_battery_event(hidpp, data, size); if (ret != 0) return ret; } if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS) { ret = hidpp10_wheel_raw_event(hidpp, data, size); if (ret != 0) return ret; } if (hidpp->quirks & HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS) { ret = hidpp10_extra_mouse_buttons_raw_event(hidpp, data, size); if (ret != 0) return ret; } if (hidpp->quirks & HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS) { ret = hidpp10_consumer_keys_raw_event(hidpp, data, size); if (ret != 0) return ret; } return 0; } static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); int ret = 0; if (!hidpp) return 0; /* Generic HID++ processing. */ switch (data[0]) { case REPORT_ID_HIDPP_VERY_LONG: if (size != hidpp->very_long_report_length) { hid_err(hdev, "received hid++ report of bad size (%d)", size); return 1; } ret = hidpp_raw_hidpp_event(hidpp, data, size); break; case REPORT_ID_HIDPP_LONG: if (size != HIDPP_REPORT_LONG_LENGTH) { hid_err(hdev, "received hid++ report of bad size (%d)", size); return 1; } ret = hidpp_raw_hidpp_event(hidpp, data, size); break; case REPORT_ID_HIDPP_SHORT: if (size != HIDPP_REPORT_SHORT_LENGTH) { hid_err(hdev, "received hid++ report of bad size (%d)", size); return 1; } ret = hidpp_raw_hidpp_event(hidpp, data, size); break; } /* If no report is available for further processing, skip calling * raw_event of subclasses. */ if (ret != 0) return ret; if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) return wtp_raw_event(hdev, data, size); else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) return m560_raw_event(hdev, data, size); return 0; } static int hidpp_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { /* This function will only be called for scroll events, due to the * restriction imposed in hidpp_usages. */ struct hidpp_device *hidpp = hid_get_drvdata(hdev); struct hidpp_scroll_counter *counter; if (!hidpp) return 0; counter = &hidpp->vertical_wheel_counter; /* A scroll event may occur before the multiplier has been retrieved or * the input device set, or high-res scroll enabling may fail. In such * cases we must return early (falling back to default behaviour) to * avoid a crash in hidpp_scroll_counter_handle_scroll. */ if (!(hidpp->capabilities & HIDPP_CAPABILITY_HI_RES_SCROLL) || value == 0 || hidpp->input == NULL || counter->wheel_multiplier == 0) return 0; hidpp_scroll_counter_handle_scroll(hidpp->input, counter, value); return 1; } static int hidpp_initialize_battery(struct hidpp_device *hidpp) { static atomic_t battery_no = ATOMIC_INIT(0); struct power_supply_config cfg = { .drv_data = hidpp }; struct power_supply_desc *desc = &hidpp->battery.desc; enum power_supply_property *battery_props; struct hidpp_battery *battery; unsigned int num_battery_props; unsigned long n; int ret; if (hidpp->battery.ps) return 0; hidpp->battery.feature_index = 0xff; hidpp->battery.solar_feature_index = 0xff; hidpp->battery.voltage_feature_index = 0xff; hidpp->battery.adc_measurement_feature_index = 0xff; if (hidpp->protocol_major >= 2) { if (hidpp->quirks & HIDPP_QUIRK_CLASS_K750) ret = hidpp_solar_request_battery_event(hidpp); else { /* we only support one battery feature right now, so let's first check the ones that support battery level first and leave voltage for last */ ret = hidpp20_query_battery_info_1000(hidpp); if (ret) ret = hidpp20_query_battery_info_1004(hidpp); if (ret) ret = hidpp20_query_battery_voltage_info(hidpp); if (ret) ret = hidpp20_query_adc_measurement_info_1f20(hidpp); } if (ret) return ret; hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP20_BATTERY; } else { ret = hidpp10_query_battery_status(hidpp); if (ret) { ret = hidpp10_query_battery_mileage(hidpp); if (ret) return -ENOENT; hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_MILEAGE; } else { hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS; } hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP10_BATTERY; } battery_props = devm_kmemdup(&hidpp->hid_dev->dev, hidpp_battery_props, sizeof(hidpp_battery_props), GFP_KERNEL); if (!battery_props) return -ENOMEM; num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 3; if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE || hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_PERCENTAGE || hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE || hidpp->capabilities & HIDPP_CAPABILITY_ADC_MEASUREMENT) battery_props[num_battery_props++] = POWER_SUPPLY_PROP_CAPACITY; if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS) battery_props[num_battery_props++] = POWER_SUPPLY_PROP_CAPACITY_LEVEL; if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE || hidpp->capabilities & HIDPP_CAPABILITY_ADC_MEASUREMENT) battery_props[num_battery_props++] = POWER_SUPPLY_PROP_VOLTAGE_NOW; battery = &hidpp->battery; n = atomic_inc_return(&battery_no) - 1; desc->properties = battery_props; desc->num_properties = num_battery_props; desc->get_property = hidpp_battery_get_property; sprintf(battery->name, "hidpp_battery_%ld", n); desc->name = battery->name; desc->type = POWER_SUPPLY_TYPE_BATTERY; desc->use_for_apm = 0; battery->ps = devm_power_supply_register(&hidpp->hid_dev->dev, &battery->desc, &cfg); if (IS_ERR(battery->ps)) return PTR_ERR(battery->ps); power_supply_powers(battery->ps, &hidpp->hid_dev->dev); return ret; } /* Get name + serial for USB and Bluetooth HID++ devices */ static void hidpp_non_unifying_init(struct hidpp_device *hidpp) { struct hid_device *hdev = hidpp->hid_dev; char *name; /* Bluetooth devices already have their serialnr set */ if (hid_is_usb(hdev)) hidpp_serial_init(hidpp); name = hidpp_get_device_name(hidpp); if (name) { dbg_hid("HID++: Got name: %s\n", name); snprintf(hdev->name, sizeof(hdev->name), "%s", name); kfree(name); } } static int hidpp_input_open(struct input_dev *dev) { struct hid_device *hid = input_get_drvdata(dev); return hid_hw_open(hid); } static void hidpp_input_close(struct input_dev *dev) { struct hid_device *hid = input_get_drvdata(dev); hid_hw_close(hid); } static struct input_dev *hidpp_allocate_input(struct hid_device *hdev) { struct input_dev *input_dev = devm_input_allocate_device(&hdev->dev); struct hidpp_device *hidpp = hid_get_drvdata(hdev); if (!input_dev) return NULL; input_set_drvdata(input_dev, hdev); input_dev->open = hidpp_input_open; input_dev->close = hidpp_input_close; input_dev->name = hidpp->name; input_dev->phys = hdev->phys; input_dev->uniq = hdev->uniq; input_dev->id.bustype = hdev->bus; input_dev->id.vendor = hdev->vendor; input_dev->id.product = hdev->product; input_dev->id.version = hdev->version; input_dev->dev.parent = &hdev->dev; return input_dev; } static void hidpp_connect_event(struct work_struct *work) { struct hidpp_device *hidpp = container_of(work, struct hidpp_device, work); struct hid_device *hdev = hidpp->hid_dev; struct input_dev *input; char *name, *devm_name; int ret; /* Get device version to check if it is connected */ ret = hidpp_root_get_protocol_version(hidpp); if (ret) { hid_dbg(hidpp->hid_dev, "Disconnected\n"); if (hidpp->battery.ps) { hidpp->battery.online = false; hidpp->battery.status = POWER_SUPPLY_STATUS_UNKNOWN; hidpp->battery.level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; power_supply_changed(hidpp->battery.ps); } return; } if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) { ret = wtp_connect(hdev); if (ret) return; } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) { ret = m560_send_config_command(hdev); if (ret) return; } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_K400) { ret = k400_connect(hdev); if (ret) return; } if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS) { ret = hidpp10_wheel_connect(hidpp); if (ret) return; } if (hidpp->quirks & HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS) { ret = hidpp10_extra_mouse_buttons_connect(hidpp); if (ret) return; } if (hidpp->quirks & HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS) { ret = hidpp10_consumer_keys_connect(hidpp); if (ret) return; } if (hidpp->protocol_major >= 2) { u8 feature_index; if (!hidpp_get_wireless_feature_index(hidpp, &feature_index)) hidpp->wireless_feature_index = feature_index; } if (hidpp->name == hdev->name && hidpp->protocol_major >= 2) { name = hidpp_get_device_name(hidpp); if (name) { devm_name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s", name); kfree(name); if (!devm_name) return; hidpp->name = devm_name; } } hidpp_initialize_battery(hidpp); if (!hid_is_usb(hidpp->hid_dev)) hidpp_initialize_hires_scroll(hidpp); /* forward current battery state */ if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) { hidpp10_enable_battery_reporting(hidpp); if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE) hidpp10_query_battery_mileage(hidpp); else hidpp10_query_battery_status(hidpp); } else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) { if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE) hidpp20_query_battery_voltage_info(hidpp); else if (hidpp->capabilities & HIDPP_CAPABILITY_UNIFIED_BATTERY) hidpp20_query_battery_info_1004(hidpp); else if (hidpp->capabilities & HIDPP_CAPABILITY_ADC_MEASUREMENT) hidpp20_query_adc_measurement_info_1f20(hidpp); else hidpp20_query_battery_info_1000(hidpp); } if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); if (hidpp->capabilities & HIDPP_CAPABILITY_HI_RES_SCROLL) hi_res_scroll_enable(hidpp); if (!(hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) || hidpp->delayed_input) /* if the input nodes are already created, we can stop now */ return; input = hidpp_allocate_input(hdev); if (!input) { hid_err(hdev, "cannot allocate new input device: %d\n", ret); return; } hidpp_populate_input(hidpp, input); ret = input_register_device(input); if (ret) { input_free_device(input); return; } hidpp->delayed_input = input; } static DEVICE_ATTR(builtin_power_supply, 0000, NULL, NULL); static struct attribute *sysfs_attrs[] = { &dev_attr_builtin_power_supply.attr, NULL }; static const struct attribute_group ps_attribute_group = { .attrs = sysfs_attrs }; static int hidpp_get_report_length(struct hid_device *hdev, int id) { struct hid_report_enum *re; struct hid_report *report; re = &(hdev->report_enum[HID_OUTPUT_REPORT]); report = re->report_id_hash[id]; if (!report) return 0; return report->field[0]->report_count + 1; } static u8 hidpp_validate_device(struct hid_device *hdev) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); int id, report_length; u8 supported_reports = 0; id = REPORT_ID_HIDPP_SHORT; report_length = hidpp_get_report_length(hdev, id); if (report_length) { if (report_length < HIDPP_REPORT_SHORT_LENGTH) goto bad_device; supported_reports |= HIDPP_REPORT_SHORT_SUPPORTED; } id = REPORT_ID_HIDPP_LONG; report_length = hidpp_get_report_length(hdev, id); if (report_length) { if (report_length < HIDPP_REPORT_LONG_LENGTH) goto bad_device; supported_reports |= HIDPP_REPORT_LONG_SUPPORTED; } id = REPORT_ID_HIDPP_VERY_LONG; report_length = hidpp_get_report_length(hdev, id); if (report_length) { if (report_length < HIDPP_REPORT_LONG_LENGTH || report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH) goto bad_device; supported_reports |= HIDPP_REPORT_VERY_LONG_SUPPORTED; hidpp->very_long_report_length = report_length; } return supported_reports; bad_device: hid_warn(hdev, "not enough values in hidpp report %d\n", id); return false; } static bool hidpp_application_equals(struct hid_device *hdev, unsigned int application) { struct list_head *report_list; struct hid_report *report; report_list = &hdev->report_enum[HID_INPUT_REPORT].report_list; report = list_first_entry_or_null(report_list, struct hid_report, list); return report && report->application == application; } static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct hidpp_device *hidpp; int ret; unsigned int connect_mask = HID_CONNECT_DEFAULT; /* report_fixup needs drvdata to be set before we call hid_parse */ hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL); if (!hidpp) return -ENOMEM; hidpp->hid_dev = hdev; hidpp->name = hdev->name; hidpp->quirks = id->driver_data; hid_set_drvdata(hdev, hidpp); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "%s:parse failed\n", __func__); return ret; } /* * Make sure the device is HID++ capable, otherwise treat as generic HID */ hidpp->supported_reports = hidpp_validate_device(hdev); if (!hidpp->supported_reports) { hid_set_drvdata(hdev, NULL); devm_kfree(&hdev->dev, hidpp); return hid_hw_start(hdev, HID_CONNECT_DEFAULT); } if (id->group == HID_GROUP_LOGITECH_27MHZ_DEVICE && hidpp_application_equals(hdev, HID_GD_MOUSE)) hidpp->quirks |= HIDPP_QUIRK_HIDPP_WHEELS | HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS; if (id->group == HID_GROUP_LOGITECH_27MHZ_DEVICE && hidpp_application_equals(hdev, HID_GD_KEYBOARD)) hidpp->quirks |= HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS; if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) { ret = wtp_allocate(hdev, id); if (ret) return ret; } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_K400) { ret = k400_allocate(hdev); if (ret) return ret; } INIT_WORK(&hidpp->work, hidpp_connect_event); mutex_init(&hidpp->send_mutex); init_waitqueue_head(&hidpp->wait); /* indicates we are handling the battery properties in the kernel */ ret = sysfs_create_group(&hdev->dev.kobj, &ps_attribute_group); if (ret) hid_warn(hdev, "Cannot allocate sysfs group for %s\n", hdev->name); /* * First call hid_hw_start(hdev, 0) to allow IO without connecting any * hid subdrivers (hid-input, hidraw). This allows retrieving the dev's * name and serial number and store these in hdev->name and hdev->uniq, * before the hid-input and hidraw drivers expose these to userspace. */ ret = hid_hw_start(hdev, 0); if (ret) { hid_err(hdev, "hw start failed\n"); goto hid_hw_start_fail; } ret = hid_hw_open(hdev); if (ret < 0) { dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n", __func__, ret); goto hid_hw_open_fail; } /* Allow incoming packets */ hid_device_io_start(hdev); /* Get name + serial, store in hdev->name + hdev->uniq */ if (id->group == HID_GROUP_LOGITECH_DJ_DEVICE) hidpp_unifying_init(hidpp); else hidpp_non_unifying_init(hidpp); if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) connect_mask &= ~HID_CONNECT_HIDINPUT; /* Now export the actual inputs and hidraw nodes to the world */ hid_device_io_stop(hdev); ret = hid_connect(hdev, connect_mask); if (ret) { hid_err(hdev, "%s:hid_connect returned error %d\n", __func__, ret); goto hid_hw_init_fail; } /* Check for connected devices now that incoming packets will not be disabled again */ hid_device_io_start(hdev); schedule_work(&hidpp->work); flush_work(&hidpp->work); if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) { struct hidpp_ff_private_data data; ret = g920_get_config(hidpp, &data); if (!ret) ret = hidpp_ff_init(hidpp, &data); if (ret) hid_warn(hidpp->hid_dev, "Unable to initialize force feedback support, errno %d\n", ret); } /* * This relies on logi_dj_ll_close() being a no-op so that DJ connection * events will still be received. */ hid_hw_close(hdev); return ret; hid_hw_init_fail: hid_hw_close(hdev); hid_hw_open_fail: hid_hw_stop(hdev); hid_hw_start_fail: sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group); cancel_work_sync(&hidpp->work); mutex_destroy(&hidpp->send_mutex); return ret; } static void hidpp_remove(struct hid_device *hdev) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); if (!hidpp) return hid_hw_stop(hdev); sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group); hid_hw_stop(hdev); cancel_work_sync(&hidpp->work); mutex_destroy(&hidpp->send_mutex); } #define LDJ_DEVICE(product) \ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \ USB_VENDOR_ID_LOGITECH, (product)) #define L27MHZ_DEVICE(product) \ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_27MHZ_DEVICE, \ USB_VENDOR_ID_LOGITECH, (product)) static const struct hid_device_id hidpp_devices[] = { { /* wireless touchpad */ LDJ_DEVICE(0x4011), .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS }, { /* wireless touchpad T650 */ LDJ_DEVICE(0x4101), .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT }, { /* wireless touchpad T651 */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651), .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT }, { /* Mouse Logitech Anywhere MX */ LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 }, { /* Mouse logitech M560 */ LDJ_DEVICE(0x402d), .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 }, { /* Mouse Logitech M705 (firmware RQM17) */ LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 }, { /* Mouse Logitech Performance MX */ LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 }, { /* Keyboard logitech K400 */ LDJ_DEVICE(0x4024), .driver_data = HIDPP_QUIRK_CLASS_K400 }, { /* Solar Keyboard Logitech K750 */ LDJ_DEVICE(0x4002), .driver_data = HIDPP_QUIRK_CLASS_K750 }, { /* Keyboard MX5000 (Bluetooth-receiver in HID proxy mode) */ LDJ_DEVICE(0xb305), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, { /* Dinovo Edge (Bluetooth-receiver in HID proxy mode) */ LDJ_DEVICE(0xb309), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, { /* Keyboard MX5500 (Bluetooth-receiver in HID proxy mode) */ LDJ_DEVICE(0xb30b), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, { LDJ_DEVICE(HID_ANY_ID) }, { /* Keyboard LX501 (Y-RR53) */ L27MHZ_DEVICE(0x0049), .driver_data = HIDPP_QUIRK_KBD_ZOOM_WHEEL }, { /* Keyboard MX3000 (Y-RAM74) */ L27MHZ_DEVICE(0x0057), .driver_data = HIDPP_QUIRK_KBD_SCROLL_WHEEL }, { /* Keyboard MX3200 (Y-RAV80) */ L27MHZ_DEVICE(0x005c), .driver_data = HIDPP_QUIRK_KBD_ZOOM_WHEEL }, { /* S510 Media Remote */ L27MHZ_DEVICE(0x00fe), .driver_data = HIDPP_QUIRK_KBD_SCROLL_WHEEL }, { L27MHZ_DEVICE(HID_ANY_ID) }, { /* Logitech G403 Wireless Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) }, { /* Logitech G502 Lightspeed Wireless Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08D) }, { /* Logitech G703 Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) }, { /* Logitech G703 Hero Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC090) }, { /* Logitech G900 Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC081) }, { /* Logitech G903 Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC086) }, { /* Logitech G Pro Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) }, { /* MX Vertical over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08A) }, { /* Logitech G703 Hero Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC090) }, { /* Logitech G903 Hero Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC091) }, { /* Logitech G915 TKL Keyboard over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC343) }, { /* Logitech G920 Wheel over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL), .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS}, { /* Logitech G923 Wheel (Xbox version) over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G923_XBOX_WHEEL), .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS }, { /* Logitech G Pro X Superlight Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC094) }, { /* Logitech G Pro X Superlight 2 Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC09b) }, { /* G935 Gaming Headset */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0x0a87), .driver_data = HIDPP_QUIRK_WIRELESS_STATUS }, { /* MX5000 keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, { /* Dinovo Edge keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb309), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, { /* MX5500 keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, { /* Logitech G915 TKL keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb35f) }, { /* M-RCQ142 V470 Cordless Laser Mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) }, { /* MX Master mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012) }, { /* M720 Triathlon mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb015) }, { /* MX Master 2S mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb019) }, { /* MX Ergo trackball over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e) }, { /* MX Vertical mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb020) }, { /* Signature M650 over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb02a) }, { /* MX Master 3 mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023) }, { /* MX Anywhere 3 mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb025) }, { /* MX Master 3S mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb034) }, { /* MX Anywhere 3SB mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb038) }, {} }; MODULE_DEVICE_TABLE(hid, hidpp_devices); static const struct hid_usage_id hidpp_usages[] = { { HID_GD_WHEEL, EV_REL, REL_WHEEL_HI_RES }, { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} }; static struct hid_driver hidpp_driver = { .name = "logitech-hidpp-device", .id_table = hidpp_devices, .report_fixup = hidpp_report_fixup, .probe = hidpp_probe, .remove = hidpp_remove, .raw_event = hidpp_raw_event, .usage_table = hidpp_usages, .event = hidpp_event, .input_configured = hidpp_input_configured, .input_mapping = hidpp_input_mapping, .input_mapped = hidpp_input_mapped, }; module_hid_driver(hidpp_driver);
9 10 18 17 11 1 29 28 29 8 25 46 47 47 47 46 46 25 26 12 12 27 27 22 8 9 18 13 27 50 49 27 27 22 6 21 26 26 26 26 8 1 8 4 4 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 // SPDX-License-Identifier: GPL-2.0-or-later /* * IPV6 GSO/GRO offload support * Linux INET6 implementation */ #include <linux/kernel.h> #include <linux/socket.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/printk.h> #include <net/protocol.h> #include <net/ipv6.h> #include <net/inet_common.h> #include <net/tcp.h> #include <net/udp.h> #include <net/gro.h> #include <net/gso.h> #include "ip6_offload.h" /* All GRO functions are always builtin, except UDP over ipv6, which lays in * ipv6 module, as it depends on UDPv6 lookup function, so we need special care * when ipv6 is built as a module */ #if IS_BUILTIN(CONFIG_IPV6) #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__) #else #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__) #endif #define indirect_call_gro_receive_l4(f2, f1, cb, head, skb) \ ({ \ unlikely(gro_recursion_inc_test(skb)) ? \ NAPI_GRO_CB(skb)->flush |= 1, NULL : \ INDIRECT_CALL_L4(cb, f2, f1, head, skb); \ }) static int ipv6_gro_pull_exthdrs(struct sk_buff *skb, int off, int proto) { const struct net_offload *ops = NULL; struct ipv6_opt_hdr *opth; for (;;) { int len; ops = rcu_dereference(inet6_offloads[proto]); if (unlikely(!ops)) break; if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) break; opth = skb_gro_header(skb, off + sizeof(*opth), off); if (unlikely(!opth)) break; len = ipv6_optlen(opth); opth = skb_gro_header(skb, off + len, off); if (unlikely(!opth)) break; proto = opth->nexthdr; off += len; } skb_gro_pull(skb, off - skb_gro_receive_network_offset(skb)); return proto; } static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) { const struct net_offload *ops = NULL; for (;;) { struct ipv6_opt_hdr *opth; int len; ops = rcu_dereference(inet6_offloads[proto]); if (unlikely(!ops)) break; if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) break; if (unlikely(!pskb_may_pull(skb, 8))) break; opth = (void *)skb->data; len = ipv6_optlen(opth); if (unlikely(!pskb_may_pull(skb, len))) break; opth = (void *)skb->data; proto = opth->nexthdr; __skb_pull(skb, len); } return proto; } static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); struct ipv6hdr *ipv6h; const struct net_offload *ops; int proto, err; struct frag_hdr *fptr; unsigned int payload_len; u8 *prevhdr; int offset = 0; bool encap, udpfrag; int nhoff; bool gso_partial; skb_reset_network_header(skb); err = ipv6_hopopt_jumbo_remove(skb); if (err) return ERR_PTR(err); nhoff = skb_network_header(skb) - skb_mac_header(skb); if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) goto out; encap = SKB_GSO_CB(skb)->encap_level > 0; if (encap) features &= skb->dev->hw_enc_features; SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); ipv6h = ipv6_hdr(skb); __skb_pull(skb, sizeof(*ipv6h)); segs = ERR_PTR(-EPROTONOSUPPORT); proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); if (skb->encapsulation && skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6)) udpfrag = proto == IPPROTO_UDP && encap && (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); else udpfrag = proto == IPPROTO_UDP && !skb->encapsulation && (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); ops = rcu_dereference(inet6_offloads[proto]); if (likely(ops && ops->callbacks.gso_segment)) { skb_reset_transport_header(skb); segs = ops->callbacks.gso_segment(skb, features); if (!segs) skb->network_header = skb_mac_header(skb) + nhoff - skb->head; } if (IS_ERR_OR_NULL(segs)) goto out; gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); for (skb = segs; skb; skb = skb->next) { ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); if (gso_partial && skb_is_gso(skb)) payload_len = skb_shinfo(skb)->gso_size + SKB_GSO_CB(skb)->data_offset + skb->head - (unsigned char *)(ipv6h + 1); else payload_len = skb->len - nhoff - sizeof(*ipv6h); ipv6h->payload_len = htons(payload_len); skb->network_header = (u8 *)ipv6h - skb->head; skb_reset_mac_len(skb); if (udpfrag) { int err = ip6_find_1stfragopt(skb, &prevhdr); if (err < 0) { kfree_skb_list(segs); return ERR_PTR(err); } fptr = (struct frag_hdr *)((u8 *)ipv6h + err); fptr->frag_off = htons(offset); if (skb->next) fptr->frag_off |= htons(IP6_MF); offset += (ntohs(ipv6h->payload_len) - sizeof(struct frag_hdr)); } if (encap) skb_reset_inner_headers(skb); } out: return segs; } /* Return the total length of all the extension hdrs, following the same * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs. */ static int ipv6_exthdrs_len(struct ipv6hdr *iph, const struct net_offload **opps) { struct ipv6_opt_hdr *opth = (void *)iph; int len = 0, proto, optlen = sizeof(*iph); proto = iph->nexthdr; for (;;) { *opps = rcu_dereference(inet6_offloads[proto]); if (unlikely(!(*opps))) break; if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR)) break; opth = (void *)opth + optlen; optlen = ipv6_optlen(opth); len += optlen; proto = opth->nexthdr; } return len; } INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, struct sk_buff *skb) { const struct net_offload *ops; struct sk_buff *pp = NULL; struct sk_buff *p; struct ipv6hdr *iph; unsigned int nlen; unsigned int hlen; unsigned int off; u16 flush = 1; int proto; off = skb_gro_offset(skb); hlen = off + sizeof(*iph); iph = skb_gro_header(skb, hlen, off); if (unlikely(!iph)) goto out; NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off; flush += ntohs(iph->payload_len) != skb->len - hlen; proto = iph->nexthdr; ops = rcu_dereference(inet6_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) { proto = ipv6_gro_pull_exthdrs(skb, hlen, proto); ops = rcu_dereference(inet6_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) goto out; iph = skb_gro_network_header(skb); } else { skb_gro_pull(skb, sizeof(*iph)); } skb_set_transport_header(skb, skb_gro_offset(skb)); NAPI_GRO_CB(skb)->proto = proto; flush--; nlen = skb_gro_offset(skb) - off; list_for_each_entry(p, head, list) { const struct ipv6hdr *iph2; __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */ if (!NAPI_GRO_CB(p)->same_flow) continue; iph2 = (struct ipv6hdr *)(p->data + off); first_word = *(__be32 *)iph ^ *(__be32 *)iph2; /* All fields must match except length and Traffic Class. * XXX skbs on the gro_list have all been parsed and pulled * already so we don't need to compare nlen * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops))) * memcmp() alone below is sufficient, right? */ if ((first_word & htonl(0xF00FFFFF)) || !ipv6_addr_equal(&iph->saddr, &iph2->saddr) || !ipv6_addr_equal(&iph->daddr, &iph2->daddr) || iph->nexthdr != iph2->nexthdr) { not_same_flow: NAPI_GRO_CB(p)->same_flow = 0; continue; } if (unlikely(nlen > sizeof(struct ipv6hdr))) { if (memcmp(iph + 1, iph2 + 1, nlen - sizeof(struct ipv6hdr))) goto not_same_flow; } } NAPI_GRO_CB(skb)->flush |= flush; skb_gro_postpull_rcsum(skb, iph, nlen); pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive, ops->callbacks.gro_receive, head, skb); out: skb_gro_flush_final(skb, pp, flush); return pp; } static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head, struct sk_buff *skb) { /* Common GRO receive for SIT and IP6IP6 */ if (NAPI_GRO_CB(skb)->encap_mark) { NAPI_GRO_CB(skb)->flush = 1; return NULL; } NAPI_GRO_CB(skb)->encap_mark = 1; return ipv6_gro_receive(head, skb); } static struct sk_buff *ip4ip6_gro_receive(struct list_head *head, struct sk_buff *skb) { /* Common GRO receive for SIT and IP6IP6 */ if (NAPI_GRO_CB(skb)->encap_mark) { NAPI_GRO_CB(skb)->flush = 1; return NULL; } NAPI_GRO_CB(skb)->encap_mark = 1; return inet_gro_receive(head, skb); } INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff) { const struct net_offload *ops; struct ipv6hdr *iph; int err = -ENOSYS; u32 payload_len; if (skb->encapsulation) { skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6)); skb_set_inner_network_header(skb, nhoff); } payload_len = skb->len - nhoff - sizeof(*iph); if (unlikely(payload_len > IPV6_MAXPLEN)) { struct hop_jumbo_hdr *hop_jumbo; int hoplen = sizeof(*hop_jumbo); /* Move network header left */ memmove(skb_mac_header(skb) - hoplen, skb_mac_header(skb), skb->transport_header - skb->mac_header); skb->data -= hoplen; skb->len += hoplen; skb->mac_header -= hoplen; skb->network_header -= hoplen; iph = (struct ipv6hdr *)(skb->data + nhoff); hop_jumbo = (struct hop_jumbo_hdr *)(iph + 1); /* Build hop-by-hop options */ hop_jumbo->nexthdr = iph->nexthdr; hop_jumbo->hdrlen = 0; hop_jumbo->tlv_type = IPV6_TLV_JUMBO; hop_jumbo->tlv_len = 4; hop_jumbo->jumbo_payload_len = htonl(payload_len + hoplen); iph->nexthdr = NEXTHDR_HOP; iph->payload_len = 0; } else { iph = (struct ipv6hdr *)(skb->data + nhoff); iph->payload_len = htons(payload_len); } nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops); if (WARN_ON(!ops || !ops->callbacks.gro_complete)) goto out; err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete, udp6_gro_complete, skb, nhoff); out: return err; } static int sit_gro_complete(struct sk_buff *skb, int nhoff) { skb->encapsulation = 1; skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4; return ipv6_gro_complete(skb, nhoff); } static int ip6ip6_gro_complete(struct sk_buff *skb, int nhoff) { skb->encapsulation = 1; skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; return ipv6_gro_complete(skb, nhoff); } static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff) { skb->encapsulation = 1; skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; return inet_gro_complete(skb, nhoff); } static struct sk_buff *sit_gso_segment(struct sk_buff *skb, netdev_features_t features) { if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4)) return ERR_PTR(-EINVAL); return ipv6_gso_segment(skb, features); } static struct sk_buff *ip4ip6_gso_segment(struct sk_buff *skb, netdev_features_t features) { if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6)) return ERR_PTR(-EINVAL); return inet_gso_segment(skb, features); } static struct sk_buff *ip6ip6_gso_segment(struct sk_buff *skb, netdev_features_t features) { if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6)) return ERR_PTR(-EINVAL); return ipv6_gso_segment(skb, features); } static const struct net_offload sit_offload = { .callbacks = { .gso_segment = sit_gso_segment, .gro_receive = sit_ip6ip6_gro_receive, .gro_complete = sit_gro_complete, }, }; static const struct net_offload ip4ip6_offload = { .callbacks = { .gso_segment = ip4ip6_gso_segment, .gro_receive = ip4ip6_gro_receive, .gro_complete = ip4ip6_gro_complete, }, }; static const struct net_offload ip6ip6_offload = { .callbacks = { .gso_segment = ip6ip6_gso_segment, .gro_receive = sit_ip6ip6_gro_receive, .gro_complete = ip6ip6_gro_complete, }, }; static int __init ipv6_offload_init(void) { if (tcpv6_offload_init() < 0) pr_crit("%s: Cannot add TCP protocol offload\n", __func__); if (ipv6_exthdrs_offload_init() < 0) pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__); net_hotdata.ipv6_packet_offload = (struct packet_offload) { .type = cpu_to_be16(ETH_P_IPV6), .callbacks = { .gso_segment = ipv6_gso_segment, .gro_receive = ipv6_gro_receive, .gro_complete = ipv6_gro_complete, }, }; dev_add_offload(&net_hotdata.ipv6_packet_offload); inet_add_offload(&sit_offload, IPPROTO_IPV6); inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6); inet6_add_offload(&ip4ip6_offload, IPPROTO_IPIP); return 0; } fs_initcall(ipv6_offload_init);
1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for some monterey "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { hid_info(hdev, "fixing up button/consumer in HID report descriptor\n"); rdesc[30] = 0x0c; } return rdesc; } #define mr_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int mr_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case 0x156: mr_map_key_clear(KEY_WORDPROCESSOR); break; case 0x157: mr_map_key_clear(KEY_SPREADSHEET); break; case 0x158: mr_map_key_clear(KEY_PRESENTATION); break; case 0x15c: mr_map_key_clear(KEY_STOP); break; default: return 0; } return 1; } static const struct hid_device_id mr_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, { } }; MODULE_DEVICE_TABLE(hid, mr_devices); static struct hid_driver mr_driver = { .name = "monterey", .id_table = mr_devices, .report_fixup = mr_report_fixup, .input_mapping = mr_input_mapping, }; module_hid_driver(mr_driver); MODULE_DESCRIPTION("HID driver for some monterey \"special\" devices"); MODULE_LICENSE("GPL");
1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * HID driver for NVIDIA SHIELD peripherals. */ #include <linux/hid.h> #include <linux/idr.h> #include <linux/input-event-codes.h> #include <linux/input.h> #include <linux/jiffies.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/power_supply.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/workqueue.h> #include "hid-ids.h" #define NOT_INIT_STR "NOT INITIALIZED" #define android_map_key(c) hid_map_usage(hi, usage, bit, max, EV_KEY, (c)) enum { HID_USAGE_ANDROID_PLAYPAUSE_BTN = 0xcd, /* Double-tap volume slider */ HID_USAGE_ANDROID_VOLUMEUP_BTN = 0xe9, HID_USAGE_ANDROID_VOLUMEDOWN_BTN = 0xea, HID_USAGE_ANDROID_SEARCH_BTN = 0x221, /* NVIDIA btn on Thunderstrike */ HID_USAGE_ANDROID_HOME_BTN = 0x223, HID_USAGE_ANDROID_BACK_BTN = 0x224, }; enum { SHIELD_FW_VERSION_INITIALIZED = 0, SHIELD_BOARD_INFO_INITIALIZED, SHIELD_BATTERY_STATS_INITIALIZED, SHIELD_CHARGER_STATE_INITIALIZED, }; enum { THUNDERSTRIKE_FW_VERSION_UPDATE = 0, THUNDERSTRIKE_BOARD_INFO_UPDATE, THUNDERSTRIKE_HAPTICS_UPDATE, THUNDERSTRIKE_LED_UPDATE, THUNDERSTRIKE_POWER_SUPPLY_STATS_UPDATE, }; enum { THUNDERSTRIKE_HOSTCMD_REPORT_SIZE = 33, THUNDERSTRIKE_HOSTCMD_REQ_REPORT_ID = 0x4, THUNDERSTRIKE_HOSTCMD_RESP_REPORT_ID = 0x3, }; enum { THUNDERSTRIKE_HOSTCMD_ID_FW_VERSION = 1, THUNDERSTRIKE_HOSTCMD_ID_LED = 6, THUNDERSTRIKE_HOSTCMD_ID_BATTERY, THUNDERSTRIKE_HOSTCMD_ID_BOARD_INFO = 16, THUNDERSTRIKE_HOSTCMD_ID_USB_INIT = 53, THUNDERSTRIKE_HOSTCMD_ID_HAPTICS = 57, THUNDERSTRIKE_HOSTCMD_ID_CHARGER, }; struct power_supply_dev { struct power_supply *psy; struct power_supply_desc desc; }; struct thunderstrike_psy_prop_values { int voltage_min; int voltage_now; int voltage_avg; int voltage_boot; int capacity; int status; int charge_type; int temp; }; static const enum power_supply_property thunderstrike_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_MIN, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_VOLTAGE_AVG, POWER_SUPPLY_PROP_VOLTAGE_BOOT, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_MIN, POWER_SUPPLY_PROP_TEMP_MAX, POWER_SUPPLY_PROP_TEMP_ALERT_MIN, POWER_SUPPLY_PROP_TEMP_ALERT_MAX, }; enum thunderstrike_led_state { THUNDERSTRIKE_LED_OFF = 1, THUNDERSTRIKE_LED_ON = 8, } __packed; static_assert(sizeof(enum thunderstrike_led_state) == 1); struct thunderstrike_hostcmd_battery { __le16 voltage_avg; u8 reserved_at_10; __le16 thermistor; __le16 voltage_min; __le16 voltage_boot; __le16 voltage_now; u8 capacity; } __packed; enum thunderstrike_charger_type { THUNDERSTRIKE_CHARGER_TYPE_NONE = 0, THUNDERSTRIKE_CHARGER_TYPE_TRICKLE, THUNDERSTRIKE_CHARGER_TYPE_NORMAL, } __packed; static_assert(sizeof(enum thunderstrike_charger_type) == 1); enum thunderstrike_charger_state { THUNDERSTRIKE_CHARGER_STATE_UNKNOWN = 0, THUNDERSTRIKE_CHARGER_STATE_DISABLED, THUNDERSTRIKE_CHARGER_STATE_CHARGING, THUNDERSTRIKE_CHARGER_STATE_FULL, THUNDERSTRIKE_CHARGER_STATE_FAILED = 8, } __packed; static_assert(sizeof(enum thunderstrike_charger_state) == 1); struct thunderstrike_hostcmd_charger { u8 connected; enum thunderstrike_charger_type type; enum thunderstrike_charger_state state; } __packed; struct thunderstrike_hostcmd_board_info { __le16 revision; __le16 serial[7]; } __packed; struct thunderstrike_hostcmd_haptics { u8 motor_left; u8 motor_right; } __packed; struct thunderstrike_hostcmd_resp_report { u8 report_id; /* THUNDERSTRIKE_HOSTCMD_RESP_REPORT_ID */ u8 cmd_id; u8 reserved_at_10; union { struct thunderstrike_hostcmd_board_info board_info; struct thunderstrike_hostcmd_haptics motors; __le16 fw_version; enum thunderstrike_led_state led_state; struct thunderstrike_hostcmd_battery battery; struct thunderstrike_hostcmd_charger charger; u8 payload[30]; } __packed; } __packed; static_assert(sizeof(struct thunderstrike_hostcmd_resp_report) == THUNDERSTRIKE_HOSTCMD_REPORT_SIZE); struct thunderstrike_hostcmd_req_report { u8 report_id; /* THUNDERSTRIKE_HOSTCMD_REQ_REPORT_ID */ u8 cmd_id; u8 reserved_at_10; union { struct __packed { u8 update; enum thunderstrike_led_state state; } led; struct __packed { u8 update; struct thunderstrike_hostcmd_haptics motors; } haptics; } __packed; u8 reserved_at_30[27]; } __packed; static_assert(sizeof(struct thunderstrike_hostcmd_req_report) == THUNDERSTRIKE_HOSTCMD_REPORT_SIZE); /* Common struct for shield accessories. */ struct shield_device { struct hid_device *hdev; struct power_supply_dev battery_dev; unsigned long initialized_flags; const char *codename; u16 fw_version; struct { u16 revision; char serial_number[15]; } board_info; }; /* * Non-trivial to uniquely identify Thunderstrike controllers at initialization * time. Use an ID allocator to help with this. */ static DEFINE_IDA(thunderstrike_ida); struct thunderstrike { struct shield_device base; int id; /* Sub-devices */ struct input_dev *haptics_dev; struct led_classdev led_dev; /* Resources */ void *req_report_dmabuf; unsigned long update_flags; struct thunderstrike_hostcmd_haptics haptics_val; spinlock_t haptics_update_lock; u8 led_state : 1; enum thunderstrike_led_state led_value; struct thunderstrike_psy_prop_values psy_stats; spinlock_t psy_stats_lock; struct timer_list psy_stats_timer; struct work_struct hostcmd_req_work; }; static inline void thunderstrike_hostcmd_req_report_init( struct thunderstrike_hostcmd_req_report *report, u8 cmd_id) { memset(report, 0, sizeof(*report)); report->report_id = THUNDERSTRIKE_HOSTCMD_REQ_REPORT_ID; report->cmd_id = cmd_id; } static inline void shield_strrev(char *dest, size_t len, u16 rev) { dest[0] = ('A' - 1) + (rev >> 8); snprintf(&dest[1], len - 1, "%02X", 0xff & rev); } static struct input_dev *shield_allocate_input_dev(struct hid_device *hdev, const char *name_suffix) { struct input_dev *idev; idev = input_allocate_device(); if (!idev) goto err_device; idev->id.bustype = hdev->bus; idev->id.vendor = hdev->vendor; idev->id.product = hdev->product; idev->id.version = hdev->version; idev->uniq = hdev->uniq; idev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name, name_suffix); if (!idev->name) goto err_name; input_set_drvdata(idev, hdev); return idev; err_name: input_free_device(idev); err_device: return ERR_PTR(-ENOMEM); } static struct input_dev *shield_haptics_create( struct shield_device *dev, int (*play_effect)(struct input_dev *, void *, struct ff_effect *)) { struct input_dev *haptics; int ret; if (!IS_ENABLED(CONFIG_NVIDIA_SHIELD_FF)) return NULL; haptics = shield_allocate_input_dev(dev->hdev, "Haptics"); if (IS_ERR(haptics)) return haptics; input_set_capability(haptics, EV_FF, FF_RUMBLE); ret = input_ff_create_memless(haptics, NULL, play_effect); if (ret) goto err; ret = input_register_device(haptics); if (ret) goto err; return haptics; err: input_free_device(haptics); return ERR_PTR(ret); } static inline void thunderstrike_send_hostcmd_request(struct thunderstrike *ts) { struct thunderstrike_hostcmd_req_report *report = ts->req_report_dmabuf; struct shield_device *shield_dev = &ts->base; int ret; ret = hid_hw_raw_request(shield_dev->hdev, report->report_id, ts->req_report_dmabuf, THUNDERSTRIKE_HOSTCMD_REPORT_SIZE, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); if (ret < 0) { hid_err(shield_dev->hdev, "Failed to output Thunderstrike HOSTCMD request HID report due to %pe\n", ERR_PTR(ret)); } } static void thunderstrike_hostcmd_req_work_handler(struct work_struct *work) { struct thunderstrike *ts = container_of(work, struct thunderstrike, hostcmd_req_work); struct thunderstrike_hostcmd_req_report *report; unsigned long flags; report = ts->req_report_dmabuf; if (test_and_clear_bit(THUNDERSTRIKE_FW_VERSION_UPDATE, &ts->update_flags)) { thunderstrike_hostcmd_req_report_init( report, THUNDERSTRIKE_HOSTCMD_ID_FW_VERSION); thunderstrike_send_hostcmd_request(ts); } if (test_and_clear_bit(THUNDERSTRIKE_LED_UPDATE, &ts->update_flags)) { thunderstrike_hostcmd_req_report_init(report, THUNDERSTRIKE_HOSTCMD_ID_LED); report->led.update = 1; report->led.state = ts->led_value; thunderstrike_send_hostcmd_request(ts); } if (test_and_clear_bit(THUNDERSTRIKE_POWER_SUPPLY_STATS_UPDATE, &ts->update_flags)) { thunderstrike_hostcmd_req_report_init( report, THUNDERSTRIKE_HOSTCMD_ID_BATTERY); thunderstrike_send_hostcmd_request(ts); thunderstrike_hostcmd_req_report_init( report, THUNDERSTRIKE_HOSTCMD_ID_CHARGER); thunderstrike_send_hostcmd_request(ts); } if (test_and_clear_bit(THUNDERSTRIKE_BOARD_INFO_UPDATE, &ts->update_flags)) { thunderstrike_hostcmd_req_report_init( report, THUNDERSTRIKE_HOSTCMD_ID_BOARD_INFO); thunderstrike_send_hostcmd_request(ts); } if (test_and_clear_bit(THUNDERSTRIKE_HAPTICS_UPDATE, &ts->update_flags)) { thunderstrike_hostcmd_req_report_init( report, THUNDERSTRIKE_HOSTCMD_ID_HAPTICS); report->haptics.update = 1; spin_lock_irqsave(&ts->haptics_update_lock, flags); report->haptics.motors = ts->haptics_val; spin_unlock_irqrestore(&ts->haptics_update_lock, flags); thunderstrike_send_hostcmd_request(ts); } } static inline void thunderstrike_request_firmware_version(struct thunderstrike *ts) { set_bit(THUNDERSTRIKE_FW_VERSION_UPDATE, &ts->update_flags); schedule_work(&ts->hostcmd_req_work); } static inline void thunderstrike_request_board_info(struct thunderstrike *ts) { set_bit(THUNDERSTRIKE_BOARD_INFO_UPDATE, &ts->update_flags); schedule_work(&ts->hostcmd_req_work); } static inline int thunderstrike_update_haptics(struct thunderstrike *ts, struct thunderstrike_hostcmd_haptics *motors) { unsigned long flags; spin_lock_irqsave(&ts->haptics_update_lock, flags); ts->haptics_val = *motors; spin_unlock_irqrestore(&ts->haptics_update_lock, flags); set_bit(THUNDERSTRIKE_HAPTICS_UPDATE, &ts->update_flags); schedule_work(&ts->hostcmd_req_work); return 0; } static int thunderstrike_play_effect(struct input_dev *idev, void *data, struct ff_effect *effect) { struct hid_device *hdev = input_get_drvdata(idev); struct thunderstrike_hostcmd_haptics motors; struct shield_device *shield_dev; struct thunderstrike *ts; if (effect->type != FF_RUMBLE) return 0; shield_dev = hid_get_drvdata(hdev); ts = container_of(shield_dev, struct thunderstrike, base); /* Thunderstrike motor values range from 0 to 32 inclusively */ motors.motor_left = effect->u.rumble.strong_magnitude / 2047; motors.motor_right = effect->u.rumble.weak_magnitude / 2047; hid_dbg(hdev, "Thunderstrike FF_RUMBLE request, left: %u right: %u\n", motors.motor_left, motors.motor_right); return thunderstrike_update_haptics(ts, &motors); } static enum led_brightness thunderstrike_led_get_brightness(struct led_classdev *led) { struct hid_device *hdev = to_hid_device(led->dev->parent); struct shield_device *shield_dev = hid_get_drvdata(hdev); struct thunderstrike *ts; ts = container_of(shield_dev, struct thunderstrike, base); return ts->led_state; } static void thunderstrike_led_set_brightness(struct led_classdev *led, enum led_brightness value) { struct hid_device *hdev = to_hid_device(led->dev->parent); struct shield_device *shield_dev = hid_get_drvdata(hdev); struct thunderstrike *ts; ts = container_of(shield_dev, struct thunderstrike, base); switch (value) { case LED_OFF: ts->led_value = THUNDERSTRIKE_LED_OFF; break; default: ts->led_value = THUNDERSTRIKE_LED_ON; break; } set_bit(THUNDERSTRIKE_LED_UPDATE, &ts->update_flags); schedule_work(&ts->hostcmd_req_work); } static int thunderstrike_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct shield_device *shield_dev = power_supply_get_drvdata(psy); struct thunderstrike_psy_prop_values prop_values; struct thunderstrike *ts; int ret = 0; ts = container_of(shield_dev, struct thunderstrike, base); spin_lock(&ts->psy_stats_lock); prop_values = ts->psy_stats; spin_unlock(&ts->psy_stats_lock); switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = prop_values.status; break; case POWER_SUPPLY_PROP_CHARGE_TYPE: val->intval = prop_values.charge_type; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = 1; break; case POWER_SUPPLY_PROP_VOLTAGE_MIN: val->intval = prop_values.voltage_min; break; case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: val->intval = 2900000; /* 2.9 V */ break; case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: val->intval = 2200000; /* 2.2 V */ break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = prop_values.voltage_now; break; case POWER_SUPPLY_PROP_VOLTAGE_AVG: val->intval = prop_values.voltage_avg; break; case POWER_SUPPLY_PROP_VOLTAGE_BOOT: val->intval = prop_values.voltage_boot; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = prop_values.capacity; break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_DEVICE; break; case POWER_SUPPLY_PROP_TEMP: val->intval = prop_values.temp; break; case POWER_SUPPLY_PROP_TEMP_MIN: val->intval = 0; /* 0 C */ break; case POWER_SUPPLY_PROP_TEMP_MAX: val->intval = 400; /* 40 C */ break; case POWER_SUPPLY_PROP_TEMP_ALERT_MIN: val->intval = 15; /* 1.5 C */ break; case POWER_SUPPLY_PROP_TEMP_ALERT_MAX: val->intval = 380; /* 38 C */ break; default: ret = -EINVAL; break; } return ret; } static inline void thunderstrike_request_psy_stats(struct thunderstrike *ts) { set_bit(THUNDERSTRIKE_POWER_SUPPLY_STATS_UPDATE, &ts->update_flags); schedule_work(&ts->hostcmd_req_work); } static void thunderstrike_psy_stats_timer_handler(struct timer_list *timer) { struct thunderstrike *ts = container_of(timer, struct thunderstrike, psy_stats_timer); thunderstrike_request_psy_stats(ts); /* Query battery statistics from device every five minutes */ mod_timer(timer, jiffies + 300 * HZ); } static void thunderstrike_parse_fw_version_payload(struct shield_device *shield_dev, __le16 fw_version) { shield_dev->fw_version = le16_to_cpu(fw_version); set_bit(SHIELD_FW_VERSION_INITIALIZED, &shield_dev->initialized_flags); hid_dbg(shield_dev->hdev, "Thunderstrike firmware version 0x%04X\n", shield_dev->fw_version); } static void thunderstrike_parse_board_info_payload(struct shield_device *shield_dev, struct thunderstrike_hostcmd_board_info *board_info) { char board_revision_str[4]; int i; shield_dev->board_info.revision = le16_to_cpu(board_info->revision); for (i = 0; i < 7; ++i) { u16 val = le16_to_cpu(board_info->serial[i]); shield_dev->board_info.serial_number[2 * i] = val & 0xFF; shield_dev->board_info.serial_number[2 * i + 1] = val >> 8; } shield_dev->board_info.serial_number[14] = '\0'; set_bit(SHIELD_BOARD_INFO_INITIALIZED, &shield_dev->initialized_flags); shield_strrev(board_revision_str, 4, shield_dev->board_info.revision); hid_dbg(shield_dev->hdev, "Thunderstrike BOARD_REVISION_%s (0x%04X) S/N: %s\n", board_revision_str, shield_dev->board_info.revision, shield_dev->board_info.serial_number); } static inline void thunderstrike_parse_haptics_payload(struct shield_device *shield_dev, struct thunderstrike_hostcmd_haptics *haptics) { hid_dbg(shield_dev->hdev, "Thunderstrike haptics HOSTCMD response, left: %u right: %u\n", haptics->motor_left, haptics->motor_right); } static void thunderstrike_parse_led_payload(struct shield_device *shield_dev, enum thunderstrike_led_state led_state) { struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base); switch (led_state) { case THUNDERSTRIKE_LED_OFF: ts->led_state = 0; break; case THUNDERSTRIKE_LED_ON: ts->led_state = 1; break; } hid_dbg(shield_dev->hdev, "Thunderstrike led HOSTCMD response, 0x%02X\n", led_state); } static void thunderstrike_parse_battery_payload( struct shield_device *shield_dev, struct thunderstrike_hostcmd_battery *battery) { struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base); u16 hostcmd_voltage_boot = le16_to_cpu(battery->voltage_boot); u16 hostcmd_voltage_avg = le16_to_cpu(battery->voltage_avg); u16 hostcmd_voltage_min = le16_to_cpu(battery->voltage_min); u16 hostcmd_voltage_now = le16_to_cpu(battery->voltage_now); u16 hostcmd_thermistor = le16_to_cpu(battery->thermistor); int voltage_boot, voltage_avg, voltage_min, voltage_now; struct hid_device *hdev = shield_dev->hdev; u8 capacity = battery->capacity; int temp; /* Convert thunderstrike device values to µV and tenths of degree Celsius */ voltage_boot = hostcmd_voltage_boot * 1000; voltage_avg = hostcmd_voltage_avg * 1000; voltage_min = hostcmd_voltage_min * 1000; voltage_now = hostcmd_voltage_now * 1000; temp = (1378 - (int)hostcmd_thermistor) * 10 / 19; /* Copy converted values */ spin_lock(&ts->psy_stats_lock); ts->psy_stats.voltage_boot = voltage_boot; ts->psy_stats.voltage_avg = voltage_avg; ts->psy_stats.voltage_min = voltage_min; ts->psy_stats.voltage_now = voltage_now; ts->psy_stats.capacity = capacity; ts->psy_stats.temp = temp; spin_unlock(&ts->psy_stats_lock); set_bit(SHIELD_BATTERY_STATS_INITIALIZED, &shield_dev->initialized_flags); hid_dbg(hdev, "Thunderstrike battery HOSTCMD response, voltage_avg: %u voltage_now: %u\n", hostcmd_voltage_avg, hostcmd_voltage_now); hid_dbg(hdev, "Thunderstrike battery HOSTCMD response, voltage_boot: %u voltage_min: %u\n", hostcmd_voltage_boot, hostcmd_voltage_min); hid_dbg(hdev, "Thunderstrike battery HOSTCMD response, thermistor: %u\n", hostcmd_thermistor); hid_dbg(hdev, "Thunderstrike battery HOSTCMD response, capacity: %u%%\n", capacity); } static void thunderstrike_parse_charger_payload( struct shield_device *shield_dev, struct thunderstrike_hostcmd_charger *charger) { struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base); int charge_type = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN; struct hid_device *hdev = shield_dev->hdev; int status = POWER_SUPPLY_STATUS_UNKNOWN; switch (charger->type) { case THUNDERSTRIKE_CHARGER_TYPE_NONE: charge_type = POWER_SUPPLY_CHARGE_TYPE_NONE; break; case THUNDERSTRIKE_CHARGER_TYPE_TRICKLE: charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; break; case THUNDERSTRIKE_CHARGER_TYPE_NORMAL: charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD; break; default: hid_warn(hdev, "Unhandled Thunderstrike charger HOSTCMD type, %u\n", charger->type); break; } switch (charger->state) { case THUNDERSTRIKE_CHARGER_STATE_UNKNOWN: status = POWER_SUPPLY_STATUS_UNKNOWN; break; case THUNDERSTRIKE_CHARGER_STATE_DISABLED: /* Indicates charger is disconnected */ break; case THUNDERSTRIKE_CHARGER_STATE_CHARGING: status = POWER_SUPPLY_STATUS_CHARGING; break; case THUNDERSTRIKE_CHARGER_STATE_FULL: status = POWER_SUPPLY_STATUS_FULL; break; case THUNDERSTRIKE_CHARGER_STATE_FAILED: status = POWER_SUPPLY_STATUS_NOT_CHARGING; hid_err(hdev, "Thunderstrike device failed to charge\n"); break; default: hid_warn(hdev, "Unhandled Thunderstrike charger HOSTCMD state, %u\n", charger->state); break; } if (!charger->connected) status = POWER_SUPPLY_STATUS_DISCHARGING; spin_lock(&ts->psy_stats_lock); ts->psy_stats.charge_type = charge_type; ts->psy_stats.status = status; spin_unlock(&ts->psy_stats_lock); set_bit(SHIELD_CHARGER_STATE_INITIALIZED, &shield_dev->initialized_flags); hid_dbg(hdev, "Thunderstrike charger HOSTCMD response, connected: %u, type: %u, state: %u\n", charger->connected, charger->type, charger->state); } static inline void thunderstrike_device_init_info(struct shield_device *shield_dev) { struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base); if (!test_bit(SHIELD_FW_VERSION_INITIALIZED, &shield_dev->initialized_flags)) thunderstrike_request_firmware_version(ts); if (!test_bit(SHIELD_BOARD_INFO_INITIALIZED, &shield_dev->initialized_flags)) thunderstrike_request_board_info(ts); if (!test_bit(SHIELD_BATTERY_STATS_INITIALIZED, &shield_dev->initialized_flags) || !test_bit(SHIELD_CHARGER_STATE_INITIALIZED, &shield_dev->initialized_flags)) thunderstrike_psy_stats_timer_handler(&ts->psy_stats_timer); } static int thunderstrike_parse_report(struct shield_device *shield_dev, struct hid_report *report, u8 *data, int size) { struct thunderstrike_hostcmd_resp_report *hostcmd_resp_report; struct hid_device *hdev = shield_dev->hdev; switch (report->id) { case THUNDERSTRIKE_HOSTCMD_RESP_REPORT_ID: if (size != THUNDERSTRIKE_HOSTCMD_REPORT_SIZE) { hid_err(hdev, "Encountered Thunderstrike HOSTCMD HID report with unexpected size %d\n", size); return -EINVAL; } hostcmd_resp_report = (struct thunderstrike_hostcmd_resp_report *)data; switch (hostcmd_resp_report->cmd_id) { case THUNDERSTRIKE_HOSTCMD_ID_FW_VERSION: thunderstrike_parse_fw_version_payload( shield_dev, hostcmd_resp_report->fw_version); break; case THUNDERSTRIKE_HOSTCMD_ID_LED: thunderstrike_parse_led_payload(shield_dev, hostcmd_resp_report->led_state); break; case THUNDERSTRIKE_HOSTCMD_ID_BATTERY: thunderstrike_parse_battery_payload(shield_dev, &hostcmd_resp_report->battery); break; case THUNDERSTRIKE_HOSTCMD_ID_BOARD_INFO: thunderstrike_parse_board_info_payload( shield_dev, &hostcmd_resp_report->board_info); break; case THUNDERSTRIKE_HOSTCMD_ID_HAPTICS: thunderstrike_parse_haptics_payload( shield_dev, &hostcmd_resp_report->motors); break; case THUNDERSTRIKE_HOSTCMD_ID_USB_INIT: /* May block HOSTCMD requests till received initially */ thunderstrike_device_init_info(shield_dev); break; case THUNDERSTRIKE_HOSTCMD_ID_CHARGER: /* May block HOSTCMD requests till received initially */ thunderstrike_device_init_info(shield_dev); thunderstrike_parse_charger_payload( shield_dev, &hostcmd_resp_report->charger); break; default: hid_warn(hdev, "Unhandled Thunderstrike HOSTCMD id %d\n", hostcmd_resp_report->cmd_id); return -ENOENT; } break; default: return 0; } return 0; } static inline int thunderstrike_led_create(struct thunderstrike *ts) { struct led_classdev *led = &ts->led_dev; led->name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL, "thunderstrike%d:blue:led", ts->id); if (!led->name) return -ENOMEM; led->max_brightness = 1; led->flags = LED_CORE_SUSPENDRESUME | LED_RETAIN_AT_SHUTDOWN; led->brightness_get = &thunderstrike_led_get_brightness; led->brightness_set = &thunderstrike_led_set_brightness; return led_classdev_register(&ts->base.hdev->dev, led); } static inline int thunderstrike_psy_create(struct shield_device *shield_dev) { struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base); struct power_supply_config psy_cfg = { .drv_data = shield_dev, }; struct hid_device *hdev = shield_dev->hdev; int ret; /* * Set an initial capacity and temperature value to avoid prematurely * triggering alerts. Will be replaced by values queried from initial * HOSTCMD requests. */ ts->psy_stats.capacity = 100; ts->psy_stats.temp = 182; shield_dev->battery_dev.desc.properties = thunderstrike_battery_props; shield_dev->battery_dev.desc.num_properties = ARRAY_SIZE(thunderstrike_battery_props); shield_dev->battery_dev.desc.get_property = thunderstrike_battery_get_property; shield_dev->battery_dev.desc.type = POWER_SUPPLY_TYPE_BATTERY; shield_dev->battery_dev.desc.name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL, "thunderstrike_%d", ts->id); if (!shield_dev->battery_dev.desc.name) return -ENOMEM; shield_dev->battery_dev.psy = power_supply_register( &hdev->dev, &shield_dev->battery_dev.desc, &psy_cfg); if (IS_ERR(shield_dev->battery_dev.psy)) { hid_err(hdev, "Failed to register Thunderstrike battery device\n"); return PTR_ERR(shield_dev->battery_dev.psy); } ret = power_supply_powers(shield_dev->battery_dev.psy, &hdev->dev); if (ret) { hid_err(hdev, "Failed to associate battery device to Thunderstrike\n"); goto err; } return 0; err: power_supply_unregister(shield_dev->battery_dev.psy); return ret; } static struct shield_device *thunderstrike_create(struct hid_device *hdev) { struct shield_device *shield_dev; struct thunderstrike *ts; int ret; ts = devm_kzalloc(&hdev->dev, sizeof(*ts), GFP_KERNEL); if (!ts) return ERR_PTR(-ENOMEM); ts->req_report_dmabuf = devm_kzalloc( &hdev->dev, THUNDERSTRIKE_HOSTCMD_REPORT_SIZE, GFP_KERNEL); if (!ts->req_report_dmabuf) return ERR_PTR(-ENOMEM); shield_dev = &ts->base; shield_dev->hdev = hdev; shield_dev->codename = "Thunderstrike"; spin_lock_init(&ts->haptics_update_lock); spin_lock_init(&ts->psy_stats_lock); INIT_WORK(&ts->hostcmd_req_work, thunderstrike_hostcmd_req_work_handler); hid_set_drvdata(hdev, shield_dev); ts->id = ida_alloc(&thunderstrike_ida, GFP_KERNEL); if (ts->id < 0) return ERR_PTR(ts->id); ts->haptics_dev = shield_haptics_create(shield_dev, thunderstrike_play_effect); if (IS_ERR(ts->haptics_dev)) { hid_err(hdev, "Failed to create Thunderstrike haptics instance\n"); ret = PTR_ERR(ts->haptics_dev); goto err_id; } ret = thunderstrike_psy_create(shield_dev); if (ret) { hid_err(hdev, "Failed to create Thunderstrike power supply instance\n"); goto err_haptics; } ret = thunderstrike_led_create(ts); if (ret) { hid_err(hdev, "Failed to create Thunderstrike LED instance\n"); goto err_psy; } timer_setup(&ts->psy_stats_timer, thunderstrike_psy_stats_timer_handler, 0); hid_info(hdev, "Registered Thunderstrike controller\n"); return shield_dev; err_psy: power_supply_unregister(shield_dev->battery_dev.psy); err_haptics: if (ts->haptics_dev) input_unregister_device(ts->haptics_dev); err_id: ida_free(&thunderstrike_ida, ts->id); return ERR_PTR(ret); } static void thunderstrike_destroy(struct thunderstrike *ts) { led_classdev_unregister(&ts->led_dev); power_supply_unregister(ts->base.battery_dev.psy); if (ts->haptics_dev) input_unregister_device(ts->haptics_dev); ida_free(&thunderstrike_ida, ts->id); } static int android_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case HID_USAGE_ANDROID_PLAYPAUSE_BTN: android_map_key(KEY_PLAYPAUSE); break; case HID_USAGE_ANDROID_VOLUMEUP_BTN: android_map_key(KEY_VOLUMEUP); break; case HID_USAGE_ANDROID_VOLUMEDOWN_BTN: android_map_key(KEY_VOLUMEDOWN); break; case HID_USAGE_ANDROID_SEARCH_BTN: android_map_key(BTN_Z); break; case HID_USAGE_ANDROID_HOME_BTN: android_map_key(BTN_MODE); break; case HID_USAGE_ANDROID_BACK_BTN: android_map_key(BTN_SELECT); break; default: return 0; } return 1; } static ssize_t firmware_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct shield_device *shield_dev; int ret; shield_dev = hid_get_drvdata(hdev); if (test_bit(SHIELD_FW_VERSION_INITIALIZED, &shield_dev->initialized_flags)) ret = sysfs_emit(buf, "0x%04X\n", shield_dev->fw_version); else ret = sysfs_emit(buf, NOT_INIT_STR "\n"); return ret; } static DEVICE_ATTR_RO(firmware_version); static ssize_t hardware_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct shield_device *shield_dev; char board_revision_str[4]; int ret; shield_dev = hid_get_drvdata(hdev); if (test_bit(SHIELD_BOARD_INFO_INITIALIZED, &shield_dev->initialized_flags)) { shield_strrev(board_revision_str, 4, shield_dev->board_info.revision); ret = sysfs_emit(buf, "%s BOARD_REVISION_%s (0x%04X)\n", shield_dev->codename, board_revision_str, shield_dev->board_info.revision); } else ret = sysfs_emit(buf, NOT_INIT_STR "\n"); return ret; } static DEVICE_ATTR_RO(hardware_version); static ssize_t serial_number_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct shield_device *shield_dev; int ret; shield_dev = hid_get_drvdata(hdev); if (test_bit(SHIELD_BOARD_INFO_INITIALIZED, &shield_dev->initialized_flags)) ret = sysfs_emit(buf, "%s\n", shield_dev->board_info.serial_number); else ret = sysfs_emit(buf, NOT_INIT_STR "\n"); return ret; } static DEVICE_ATTR_RO(serial_number); static struct attribute *shield_device_attrs[] = { &dev_attr_firmware_version.attr, &dev_attr_hardware_version.attr, &dev_attr_serial_number.attr, NULL, }; ATTRIBUTE_GROUPS(shield_device); static int shield_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct shield_device *dev = hid_get_drvdata(hdev); return thunderstrike_parse_report(dev, report, data, size); } static int shield_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct shield_device *shield_dev = NULL; struct thunderstrike *ts; int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "Parse failed\n"); return ret; } switch (id->product) { case USB_DEVICE_ID_NVIDIA_THUNDERSTRIKE_CONTROLLER: shield_dev = thunderstrike_create(hdev); break; } if (unlikely(!shield_dev)) { hid_err(hdev, "Failed to identify SHIELD device\n"); return -ENODEV; } if (IS_ERR(shield_dev)) { hid_err(hdev, "Failed to create SHIELD device\n"); return PTR_ERR(shield_dev); } ts = container_of(shield_dev, struct thunderstrike, base); ret = hid_hw_start(hdev, HID_CONNECT_HIDINPUT); if (ret) { hid_err(hdev, "Failed to start HID device\n"); goto err_ts_create; } ret = hid_hw_open(hdev); if (ret) { hid_err(hdev, "Failed to open HID device\n"); goto err_stop; } thunderstrike_device_init_info(shield_dev); return ret; err_stop: hid_hw_stop(hdev); err_ts_create: thunderstrike_destroy(ts); return ret; } static void shield_remove(struct hid_device *hdev) { struct shield_device *dev = hid_get_drvdata(hdev); struct thunderstrike *ts; ts = container_of(dev, struct thunderstrike, base); hid_hw_close(hdev); thunderstrike_destroy(ts); del_timer_sync(&ts->psy_stats_timer); cancel_work_sync(&ts->hostcmd_req_work); hid_hw_stop(hdev); } static const struct hid_device_id shield_devices[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NVIDIA, USB_DEVICE_ID_NVIDIA_THUNDERSTRIKE_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_NVIDIA, USB_DEVICE_ID_NVIDIA_THUNDERSTRIKE_CONTROLLER) }, { } }; MODULE_DEVICE_TABLE(hid, shield_devices); static struct hid_driver shield_driver = { .name = "shield", .id_table = shield_devices, .input_mapping = android_input_mapping, .probe = shield_probe, .remove = shield_remove, .raw_event = shield_raw_event, .driver = { .dev_groups = shield_device_groups, }, }; module_hid_driver(shield_driver); MODULE_AUTHOR("Rahul Rameshbabu <rrameshbabu@nvidia.com>"); MODULE_DESCRIPTION("HID Driver for NVIDIA SHIELD peripherals."); MODULE_LICENSE("GPL");
271 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef __SOUND_INFO_H #define __SOUND_INFO_H /* * Header file for info interface * Copyright (c) by Jaroslav Kysela <perex@perex.cz> */ #include <linux/poll.h> #include <linux/seq_file.h> #include <sound/core.h> /* buffer for information */ struct snd_info_buffer { char *buffer; /* pointer to begin of buffer */ unsigned int curr; /* current position in buffer */ unsigned int size; /* current size */ unsigned int len; /* total length of buffer */ int stop; /* stop flag */ int error; /* error code */ }; #define SNDRV_INFO_CONTENT_TEXT 0 #define SNDRV_INFO_CONTENT_DATA 1 struct snd_info_entry; struct snd_info_entry_text { void (*read)(struct snd_info_entry *entry, struct snd_info_buffer *buffer); void (*write)(struct snd_info_entry *entry, struct snd_info_buffer *buffer); }; struct snd_info_entry_ops { int (*open)(struct snd_info_entry *entry, unsigned short mode, void **file_private_data); int (*release)(struct snd_info_entry *entry, unsigned short mode, void *file_private_data); ssize_t (*read)(struct snd_info_entry *entry, void *file_private_data, struct file *file, char __user *buf, size_t count, loff_t pos); ssize_t (*write)(struct snd_info_entry *entry, void *file_private_data, struct file *file, const char __user *buf, size_t count, loff_t pos); loff_t (*llseek)(struct snd_info_entry *entry, void *file_private_data, struct file *file, loff_t offset, int orig); __poll_t (*poll)(struct snd_info_entry *entry, void *file_private_data, struct file *file, poll_table *wait); int (*ioctl)(struct snd_info_entry *entry, void *file_private_data, struct file *file, unsigned int cmd, unsigned long arg); int (*mmap)(struct snd_info_entry *entry, void *file_private_data, struct inode *inode, struct file *file, struct vm_area_struct *vma); }; struct snd_info_entry { const char *name; umode_t mode; long size; unsigned short content; union { struct snd_info_entry_text text; const struct snd_info_entry_ops *ops; } c; struct snd_info_entry *parent; struct module *module; void *private_data; void (*private_free)(struct snd_info_entry *entry); struct proc_dir_entry *p; struct mutex access; struct list_head children; struct list_head list; }; #if defined(CONFIG_SND_OSSEMUL) && defined(CONFIG_SND_PROC_FS) int snd_info_minor_register(void); #else #define snd_info_minor_register() 0 #endif #ifdef CONFIG_SND_PROC_FS extern struct snd_info_entry *snd_seq_root; #ifdef CONFIG_SND_OSSEMUL extern struct snd_info_entry *snd_oss_root; void snd_card_info_read_oss(struct snd_info_buffer *buffer); #else #define snd_oss_root NULL static inline void snd_card_info_read_oss(struct snd_info_buffer *buffer) {} #endif /** * snd_iprintf - printf on the procfs buffer * @buf: the procfs buffer * @fmt: the printf format * * Outputs the string on the procfs buffer just like printf(). * * Return: zero for success, or a negative error code. */ #define snd_iprintf(buf, fmt, args...) \ seq_printf((struct seq_file *)(buf)->buffer, fmt, ##args) int snd_info_init(void); int snd_info_done(void); int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len); const char *snd_info_get_str(char *dest, const char *src, int len); struct snd_info_entry *snd_info_create_module_entry(struct module *module, const char *name, struct snd_info_entry *parent); struct snd_info_entry *snd_info_create_card_entry(struct snd_card *card, const char *name, struct snd_info_entry *parent); void snd_info_free_entry(struct snd_info_entry *entry); int snd_info_card_create(struct snd_card *card); int snd_info_card_register(struct snd_card *card); int snd_info_card_free(struct snd_card *card); void snd_info_card_disconnect(struct snd_card *card); void snd_info_card_id_change(struct snd_card *card); int snd_info_register(struct snd_info_entry *entry); /* for card drivers */ static inline int snd_card_proc_new(struct snd_card *card, const char *name, struct snd_info_entry **entryp) { *entryp = snd_info_create_card_entry(card, name, card->proc_root); return *entryp ? 0 : -ENOMEM; } static inline void snd_info_set_text_ops(struct snd_info_entry *entry, void *private_data, void (*read)(struct snd_info_entry *, struct snd_info_buffer *)) { entry->private_data = private_data; entry->c.text.read = read; } int snd_card_rw_proc_new(struct snd_card *card, const char *name, void *private_data, void (*read)(struct snd_info_entry *, struct snd_info_buffer *), void (*write)(struct snd_info_entry *entry, struct snd_info_buffer *buffer)); int snd_info_check_reserved_words(const char *str); #else #define snd_seq_root NULL #define snd_oss_root NULL static inline int snd_iprintf(struct snd_info_buffer *buffer, char *fmt, ...) { return 0; } static inline int snd_info_init(void) { return 0; } static inline int snd_info_done(void) { return 0; } static inline int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len) { return 0; } static inline char *snd_info_get_str(char *dest, char *src, int len) { return NULL; } static inline struct snd_info_entry *snd_info_create_module_entry(struct module *module, const char *name, struct snd_info_entry *parent) { return NULL; } static inline struct snd_info_entry *snd_info_create_card_entry(struct snd_card *card, const char *name, struct snd_info_entry *parent) { return NULL; } static inline void snd_info_free_entry(struct snd_info_entry *entry) { ; } static inline int snd_info_card_create(struct snd_card *card) { return 0; } static inline int snd_info_card_register(struct snd_card *card) { return 0; } static inline int snd_info_card_free(struct snd_card *card) { return 0; } static inline void snd_info_card_disconnect(struct snd_card *card) { } static inline void snd_info_card_id_change(struct snd_card *card) { } static inline int snd_info_register(struct snd_info_entry *entry) { return 0; } static inline int snd_card_proc_new(struct snd_card *card, const char *name, struct snd_info_entry **entryp) { return -EINVAL; } static inline void snd_info_set_text_ops(struct snd_info_entry *entry __attribute__((unused)), void *private_data, void (*read)(struct snd_info_entry *, struct snd_info_buffer *)) {} static inline int snd_card_rw_proc_new(struct snd_card *card, const char *name, void *private_data, void (*read)(struct snd_info_entry *, struct snd_info_buffer *), void (*write)(struct snd_info_entry *entry, struct snd_info_buffer *buffer)) { return 0; } static inline int snd_info_check_reserved_words(const char *str) { return 1; } #endif /** * snd_card_ro_proc_new - Create a read-only text proc file entry for the card * @card: the card instance * @name: the file name * @private_data: the arbitrary private data * @read: the read callback * * This proc file entry will be registered via snd_card_register() call, and * it will be removed automatically at the card removal, too. */ static inline int snd_card_ro_proc_new(struct snd_card *card, const char *name, void *private_data, void (*read)(struct snd_info_entry *, struct snd_info_buffer *)) { return snd_card_rw_proc_new(card, name, private_data, read, NULL); } /* * OSS info part */ #if defined(CONFIG_SND_OSSEMUL) && defined(CONFIG_SND_PROC_FS) #define SNDRV_OSS_INFO_DEV_AUDIO 0 #define SNDRV_OSS_INFO_DEV_SYNTH 1 #define SNDRV_OSS_INFO_DEV_MIDI 2 #define SNDRV_OSS_INFO_DEV_TIMERS 4 #define SNDRV_OSS_INFO_DEV_MIXERS 5 #define SNDRV_OSS_INFO_DEV_COUNT 6 int snd_oss_info_register(int dev, int num, char *string); #define snd_oss_info_unregister(dev, num) snd_oss_info_register(dev, num, NULL) #endif /* CONFIG_SND_OSSEMUL && CONFIG_SND_PROC_FS */ #endif /* __SOUND_INFO_H */
5 5 3 2 7 7 4 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 // SPDX-License-Identifier: GPL-2.0-only /* * Transparent proxy support for Linux/iptables * * Copyright (c) 2006-2010 BalaBit IT Ltd. * Author: Balazs Scheidler, Krisztian Kovacs */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <net/checksum.h> #include <net/udp.h> #include <net/tcp.h> #include <net/inet_sock.h> #include <net/inet_hashtables.h> #include <linux/inetdevice.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <net/netfilter/ipv4/nf_defrag_ipv4.h> #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) #define XT_TPROXY_HAVE_IPV6 1 #include <net/if_inet6.h> #include <net/addrconf.h> #include <net/inet6_hashtables.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <net/netfilter/ipv6/nf_defrag_ipv6.h> #endif #include <net/netfilter/nf_tproxy.h> #include <linux/netfilter/xt_TPROXY.h> static unsigned int tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport, u_int32_t mark_mask, u_int32_t mark_value) { const struct iphdr *iph = ip_hdr(skb); struct udphdr _hdr, *hp; struct sock *sk; hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr); if (hp == NULL) return NF_DROP; /* check if there's an ongoing connection on the packet * addresses, this happens if the redirect already happened * and the current packet belongs to an already established * connection */ sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol, iph->saddr, iph->daddr, hp->source, hp->dest, skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED); laddr = nf_tproxy_laddr4(skb, laddr, iph->daddr); if (!lport) lport = hp->dest; /* UDP has no TCP_TIME_WAIT state, so we never enter here */ if (sk && sk->sk_state == TCP_TIME_WAIT) /* reopening a TIME_WAIT connection needs special handling */ sk = nf_tproxy_handle_time_wait4(net, skb, laddr, lport, sk); else if (!sk) /* no, there's no established connection, check if * there's a listener on the redirected addr/port */ sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol, iph->saddr, laddr, hp->source, lport, skb->dev, NF_TPROXY_LOOKUP_LISTENER); /* NOTE: assign_sock consumes our sk reference */ if (sk && nf_tproxy_sk_is_transparent(sk)) { /* This should be in a separate target, but we don't do multiple targets on the same rule yet */ skb->mark = (skb->mark & ~mark_mask) ^ mark_value; nf_tproxy_assign_sock(skb, sk); return NF_ACCEPT; } return NF_DROP; } static unsigned int tproxy_tg4_v0(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_tproxy_target_info *tgi = par->targinfo; return tproxy_tg4(xt_net(par), skb, tgi->laddr, tgi->lport, tgi->mark_mask, tgi->mark_value); } static unsigned int tproxy_tg4_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_tproxy_target_info_v1 *tgi = par->targinfo; return tproxy_tg4(xt_net(par), skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value); } #ifdef XT_TPROXY_HAVE_IPV6 static unsigned int tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct ipv6hdr *iph = ipv6_hdr(skb); const struct xt_tproxy_target_info_v1 *tgi = par->targinfo; struct udphdr _hdr, *hp; struct sock *sk; const struct in6_addr *laddr; __be16 lport; int thoff = 0; int tproto; tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL); if (tproto < 0) return NF_DROP; hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); if (!hp) return NF_DROP; /* check if there's an ongoing connection on the packet * addresses, this happens if the redirect already happened * and the current packet belongs to an already established * connection */ sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, tproto, &iph->saddr, &iph->daddr, hp->source, hp->dest, xt_in(par), NF_TPROXY_LOOKUP_ESTABLISHED); laddr = nf_tproxy_laddr6(skb, &tgi->laddr.in6, &iph->daddr); lport = tgi->lport ? tgi->lport : hp->dest; /* UDP has no TCP_TIME_WAIT state, so we never enter here */ if (sk && sk->sk_state == TCP_TIME_WAIT) { const struct xt_tproxy_target_info_v1 *tgi = par->targinfo; /* reopening a TIME_WAIT connection needs special handling */ sk = nf_tproxy_handle_time_wait6(skb, tproto, thoff, xt_net(par), &tgi->laddr.in6, tgi->lport, sk); } else if (!sk) /* no there's no established connection, check if * there's a listener on the redirected addr/port */ sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, tproto, &iph->saddr, laddr, hp->source, lport, xt_in(par), NF_TPROXY_LOOKUP_LISTENER); /* NOTE: assign_sock consumes our sk reference */ if (sk && nf_tproxy_sk_is_transparent(sk)) { /* This should be in a separate target, but we don't do multiple targets on the same rule yet */ skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; nf_tproxy_assign_sock(skb, sk); return NF_ACCEPT; } return NF_DROP; } static int tproxy_tg6_check(const struct xt_tgchk_param *par) { const struct ip6t_ip6 *i = par->entryinfo; int err; err = nf_defrag_ipv6_enable(par->net); if (err) return err; if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) && !(i->invflags & IP6T_INV_PROTO)) return 0; pr_info_ratelimited("Can be used only with -p tcp or -p udp\n"); return -EINVAL; } static void tproxy_tg6_destroy(const struct xt_tgdtor_param *par) { nf_defrag_ipv6_disable(par->net); } #endif static int tproxy_tg4_check(const struct xt_tgchk_param *par) { const struct ipt_ip *i = par->entryinfo; int err; err = nf_defrag_ipv4_enable(par->net); if (err) return err; if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) && !(i->invflags & IPT_INV_PROTO)) return 0; pr_info_ratelimited("Can be used only with -p tcp or -p udp\n"); return -EINVAL; } static void tproxy_tg4_destroy(const struct xt_tgdtor_param *par) { nf_defrag_ipv4_disable(par->net); } static struct xt_target tproxy_tg_reg[] __read_mostly = { { .name = "TPROXY", .family = NFPROTO_IPV4, .table = "mangle", .target = tproxy_tg4_v0, .revision = 0, .targetsize = sizeof(struct xt_tproxy_target_info), .checkentry = tproxy_tg4_check, .destroy = tproxy_tg4_destroy, .hooks = 1 << NF_INET_PRE_ROUTING, .me = THIS_MODULE, }, { .name = "TPROXY", .family = NFPROTO_IPV4, .table = "mangle", .target = tproxy_tg4_v1, .revision = 1, .targetsize = sizeof(struct xt_tproxy_target_info_v1), .checkentry = tproxy_tg4_check, .destroy = tproxy_tg4_destroy, .hooks = 1 << NF_INET_PRE_ROUTING, .me = THIS_MODULE, }, #ifdef XT_TPROXY_HAVE_IPV6 { .name = "TPROXY", .family = NFPROTO_IPV6, .table = "mangle", .target = tproxy_tg6_v1, .revision = 1, .targetsize = sizeof(struct xt_tproxy_target_info_v1), .checkentry = tproxy_tg6_check, .destroy = tproxy_tg6_destroy, .hooks = 1 << NF_INET_PRE_ROUTING, .me = THIS_MODULE, }, #endif }; static int __init tproxy_tg_init(void) { return xt_register_targets(tproxy_tg_reg, ARRAY_SIZE(tproxy_tg_reg)); } static void __exit tproxy_tg_exit(void) { xt_unregister_targets(tproxy_tg_reg, ARRAY_SIZE(tproxy_tg_reg)); } module_init(tproxy_tg_init); module_exit(tproxy_tg_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Balazs Scheidler, Krisztian Kovacs"); MODULE_DESCRIPTION("Netfilter transparent proxy (TPROXY) target module."); MODULE_ALIAS("ipt_TPROXY"); MODULE_ALIAS("ip6t_TPROXY");
12 12 6 9 16 16 16 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2015, Heiner Kallweit <hkallweit1@gmail.com> */ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "leds.h" DEFINE_LED_TRIGGER(bt_power_led_trigger); struct hci_basic_led_trigger { struct led_trigger led_trigger; struct hci_dev *hdev; }; #define to_hci_basic_led_trigger(arg) container_of(arg, \ struct hci_basic_led_trigger, led_trigger) void hci_leds_update_powered(struct hci_dev *hdev, bool enabled) { if (hdev->power_led) led_trigger_event(hdev->power_led, enabled ? LED_FULL : LED_OFF); if (!enabled) { struct hci_dev *d; read_lock(&hci_dev_list_lock); list_for_each_entry(d, &hci_dev_list, list) { if (test_bit(HCI_UP, &d->flags)) enabled = true; } read_unlock(&hci_dev_list_lock); } led_trigger_event(bt_power_led_trigger, enabled ? LED_FULL : LED_OFF); } static int power_activate(struct led_classdev *led_cdev) { struct hci_basic_led_trigger *htrig; bool powered; htrig = to_hci_basic_led_trigger(led_cdev->trigger); powered = test_bit(HCI_UP, &htrig->hdev->flags); led_trigger_event(led_cdev->trigger, powered ? LED_FULL : LED_OFF); return 0; } static struct led_trigger *led_allocate_basic(struct hci_dev *hdev, int (*activate)(struct led_classdev *led_cdev), const char *name) { struct hci_basic_led_trigger *htrig; htrig = devm_kzalloc(&hdev->dev, sizeof(*htrig), GFP_KERNEL); if (!htrig) return NULL; htrig->hdev = hdev; htrig->led_trigger.activate = activate; htrig->led_trigger.name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s-%s", hdev->name, name); if (!htrig->led_trigger.name) goto err_alloc; if (devm_led_trigger_register(&hdev->dev, &htrig->led_trigger)) goto err_register; return &htrig->led_trigger; err_register: devm_kfree(&hdev->dev, (void *)htrig->led_trigger.name); err_alloc: devm_kfree(&hdev->dev, htrig); return NULL; } void hci_leds_init(struct hci_dev *hdev) { /* initialize power_led */ hdev->power_led = led_allocate_basic(hdev, power_activate, "power"); } void bt_leds_init(void) { led_trigger_register_simple("bluetooth-power", &bt_power_led_trigger); } void bt_leds_cleanup(void) { led_trigger_unregister_simple(bt_power_led_trigger); }
9 10 10 10 10 1 3 3 8 1 4 3 3 5 5 3 3 5232 5229 18185 18187 21039 21044 10 1 9 9 1 5 16 1 1 1 1 1 2 2 3 4 26 3 26 26 26 8 1 7 37 3 37 34 34 2 13 24 10 37 34 34 2 46 1 1 45 42 1053 1054 55563 55555 55547 55569 55631 55569 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2002 Richard Henderson * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org> */ #define INCLUDE_VERMAGIC #include <linux/export.h> #include <linux/extable.h> #include <linux/moduleloader.h> #include <linux/module_signature.h> #include <linux/trace_events.h> #include <linux/init.h> #include <linux/kallsyms.h> #include <linux/buildid.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/kernel_read_file.h> #include <linux/kstrtox.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/elf.h> #include <linux/seq_file.h> #include <linux/syscalls.h> #include <linux/fcntl.h> #include <linux/rcupdate.h> #include <linux/capability.h> #include <linux/cpu.h> #include <linux/moduleparam.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/vermagic.h> #include <linux/notifier.h> #include <linux/sched.h> #include <linux/device.h> #include <linux/string.h> #include <linux/mutex.h> #include <linux/rculist.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <linux/set_memory.h> #include <asm/mmu_context.h> #include <linux/license.h> #include <asm/sections.h> #include <linux/tracepoint.h> #include <linux/ftrace.h> #include <linux/livepatch.h> #include <linux/async.h> #include <linux/percpu.h> #include <linux/kmemleak.h> #include <linux/jump_label.h> #include <linux/pfn.h> #include <linux/bsearch.h> #include <linux/dynamic_debug.h> #include <linux/audit.h> #include <linux/cfi.h> #include <linux/codetag.h> #include <linux/debugfs.h> #include <linux/execmem.h> #include <uapi/linux/module.h> #include "internal.h" #define CREATE_TRACE_POINTS #include <trace/events/module.h> /* * Mutex protects: * 1) List of modules (also safely readable with preempt_disable), * 2) module_use links, * 3) mod_tree.addr_min/mod_tree.addr_max. * (delete and add uses RCU list operations). */ DEFINE_MUTEX(module_mutex); LIST_HEAD(modules); /* Work queue for freeing init sections in success case */ static void do_free_init(struct work_struct *w); static DECLARE_WORK(init_free_wq, do_free_init); static LLIST_HEAD(init_free_list); struct mod_tree_root mod_tree __cacheline_aligned = { .addr_min = -1UL, }; struct symsearch { const struct kernel_symbol *start, *stop; const s32 *crcs; enum mod_license license; }; /* * Bounds of module memory, for speeding up __module_address. * Protected by module_mutex. */ static void __mod_update_bounds(enum mod_mem_type type __maybe_unused, void *base, unsigned int size, struct mod_tree_root *tree) { unsigned long min = (unsigned long)base; unsigned long max = min + size; #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC if (mod_mem_type_is_core_data(type)) { if (min < tree->data_addr_min) tree->data_addr_min = min; if (max > tree->data_addr_max) tree->data_addr_max = max; return; } #endif if (min < tree->addr_min) tree->addr_min = min; if (max > tree->addr_max) tree->addr_max = max; } static void mod_update_bounds(struct module *mod) { for_each_mod_mem_type(type) { struct module_memory *mod_mem = &mod->mem[type]; if (mod_mem->size) __mod_update_bounds(type, mod_mem->base, mod_mem->size, &mod_tree); } } /* Block module loading/unloading? */ int modules_disabled; core_param(nomodule, modules_disabled, bint, 0); /* Waiting for a module to finish initializing? */ static DECLARE_WAIT_QUEUE_HEAD(module_wq); static BLOCKING_NOTIFIER_HEAD(module_notify_list); int register_module_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&module_notify_list, nb); } EXPORT_SYMBOL(register_module_notifier); int unregister_module_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&module_notify_list, nb); } EXPORT_SYMBOL(unregister_module_notifier); /* * We require a truly strong try_module_get(): 0 means success. * Otherwise an error is returned due to ongoing or failed * initialization etc. */ static inline int strong_try_module_get(struct module *mod) { BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); if (mod && mod->state == MODULE_STATE_COMING) return -EBUSY; if (try_module_get(mod)) return 0; else return -ENOENT; } static inline void add_taint_module(struct module *mod, unsigned flag, enum lockdep_ok lockdep_ok) { add_taint(flag, lockdep_ok); set_bit(flag, &mod->taints); } /* * A thread that wants to hold a reference to a module only while it * is running can call this to safely exit. */ void __noreturn __module_put_and_kthread_exit(struct module *mod, long code) { module_put(mod); kthread_exit(code); } EXPORT_SYMBOL(__module_put_and_kthread_exit); /* Find a module section: 0 means not found. */ static unsigned int find_sec(const struct load_info *info, const char *name) { unsigned int i; for (i = 1; i < info->hdr->e_shnum; i++) { Elf_Shdr *shdr = &info->sechdrs[i]; /* Alloc bit cleared means "ignore it." */ if ((shdr->sh_flags & SHF_ALLOC) && strcmp(info->secstrings + shdr->sh_name, name) == 0) return i; } return 0; } /* Find a module section, or NULL. */ static void *section_addr(const struct load_info *info, const char *name) { /* Section 0 has sh_addr 0. */ return (void *)info->sechdrs[find_sec(info, name)].sh_addr; } /* Find a module section, or NULL. Fill in number of "objects" in section. */ static void *section_objs(const struct load_info *info, const char *name, size_t object_size, unsigned int *num) { unsigned int sec = find_sec(info, name); /* Section 0 has sh_addr 0 and sh_size 0. */ *num = info->sechdrs[sec].sh_size / object_size; return (void *)info->sechdrs[sec].sh_addr; } /* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */ static unsigned int find_any_sec(const struct load_info *info, const char *name) { unsigned int i; for (i = 1; i < info->hdr->e_shnum; i++) { Elf_Shdr *shdr = &info->sechdrs[i]; if (strcmp(info->secstrings + shdr->sh_name, name) == 0) return i; } return 0; } /* * Find a module section, or NULL. Fill in number of "objects" in section. * Ignores SHF_ALLOC flag. */ static __maybe_unused void *any_section_objs(const struct load_info *info, const char *name, size_t object_size, unsigned int *num) { unsigned int sec = find_any_sec(info, name); /* Section 0 has sh_addr 0 and sh_size 0. */ *num = info->sechdrs[sec].sh_size / object_size; return (void *)info->sechdrs[sec].sh_addr; } #ifndef CONFIG_MODVERSIONS #define symversion(base, idx) NULL #else #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) #endif static const char *kernel_symbol_name(const struct kernel_symbol *sym) { #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS return offset_to_ptr(&sym->name_offset); #else return sym->name; #endif } static const char *kernel_symbol_namespace(const struct kernel_symbol *sym) { #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS if (!sym->namespace_offset) return NULL; return offset_to_ptr(&sym->namespace_offset); #else return sym->namespace; #endif } int cmp_name(const void *name, const void *sym) { return strcmp(name, kernel_symbol_name(sym)); } static bool find_exported_symbol_in_section(const struct symsearch *syms, struct module *owner, struct find_symbol_arg *fsa) { struct kernel_symbol *sym; if (!fsa->gplok && syms->license == GPL_ONLY) return false; sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, sizeof(struct kernel_symbol), cmp_name); if (!sym) return false; fsa->owner = owner; fsa->crc = symversion(syms->crcs, sym - syms->start); fsa->sym = sym; fsa->license = syms->license; return true; } /* * Find an exported symbol and return it, along with, (optional) crc and * (optional) module which owns it. Needs preempt disabled or module_mutex. */ bool find_symbol(struct find_symbol_arg *fsa) { static const struct symsearch arr[] = { { __start___ksymtab, __stop___ksymtab, __start___kcrctab, NOT_GPL_ONLY }, { __start___ksymtab_gpl, __stop___ksymtab_gpl, __start___kcrctab_gpl, GPL_ONLY }, }; struct module *mod; unsigned int i; module_assert_mutex_or_preempt(); for (i = 0; i < ARRAY_SIZE(arr); i++) if (find_exported_symbol_in_section(&arr[i], NULL, fsa)) return true; list_for_each_entry_rcu(mod, &modules, list, lockdep_is_held(&module_mutex)) { struct symsearch arr[] = { { mod->syms, mod->syms + mod->num_syms, mod->crcs, NOT_GPL_ONLY }, { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, mod->gpl_crcs, GPL_ONLY }, }; if (mod->state == MODULE_STATE_UNFORMED) continue; for (i = 0; i < ARRAY_SIZE(arr); i++) if (find_exported_symbol_in_section(&arr[i], mod, fsa)) return true; } pr_debug("Failed to find symbol %s\n", fsa->name); return false; } /* * Search for module by name: must hold module_mutex (or preempt disabled * for read-only access). */ struct module *find_module_all(const char *name, size_t len, bool even_unformed) { struct module *mod; module_assert_mutex_or_preempt(); list_for_each_entry_rcu(mod, &modules, list, lockdep_is_held(&module_mutex)) { if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) continue; if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) return mod; } return NULL; } struct module *find_module(const char *name) { return find_module_all(name, strlen(name), false); } #ifdef CONFIG_SMP static inline void __percpu *mod_percpu(struct module *mod) { return mod->percpu; } static int percpu_modalloc(struct module *mod, struct load_info *info) { Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; unsigned long align = pcpusec->sh_addralign; if (!pcpusec->sh_size) return 0; if (align > PAGE_SIZE) { pr_warn("%s: per-cpu alignment %li > %li\n", mod->name, align, PAGE_SIZE); align = PAGE_SIZE; } mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align); if (!mod->percpu) { pr_warn("%s: Could not allocate %lu bytes percpu data\n", mod->name, (unsigned long)pcpusec->sh_size); return -ENOMEM; } mod->percpu_size = pcpusec->sh_size; return 0; } static void percpu_modfree(struct module *mod) { free_percpu(mod->percpu); } static unsigned int find_pcpusec(struct load_info *info) { return find_sec(info, ".data..percpu"); } static void percpu_modcopy(struct module *mod, const void *from, unsigned long size) { int cpu; for_each_possible_cpu(cpu) memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); } bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) { struct module *mod; unsigned int cpu; preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { if (mod->state == MODULE_STATE_UNFORMED) continue; if (!mod->percpu_size) continue; for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(mod->percpu, cpu); void *va = (void *)addr; if (va >= start && va < start + mod->percpu_size) { if (can_addr) { *can_addr = (unsigned long) (va - start); *can_addr += (unsigned long) per_cpu_ptr(mod->percpu, get_boot_cpu_id()); } preempt_enable(); return true; } } } preempt_enable(); return false; } /** * is_module_percpu_address() - test whether address is from module static percpu * @addr: address to test * * Test whether @addr belongs to module static percpu area. * * Return: %true if @addr is from module static percpu area */ bool is_module_percpu_address(unsigned long addr) { return __is_module_percpu_address(addr, NULL); } #else /* ... !CONFIG_SMP */ static inline void __percpu *mod_percpu(struct module *mod) { return NULL; } static int percpu_modalloc(struct module *mod, struct load_info *info) { /* UP modules shouldn't have this section: ENOMEM isn't quite right */ if (info->sechdrs[info->index.pcpu].sh_size != 0) return -ENOMEM; return 0; } static inline void percpu_modfree(struct module *mod) { } static unsigned int find_pcpusec(struct load_info *info) { return 0; } static inline void percpu_modcopy(struct module *mod, const void *from, unsigned long size) { /* pcpusec should be 0, and size of that section should be 0. */ BUG_ON(size != 0); } bool is_module_percpu_address(unsigned long addr) { return false; } bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) { return false; } #endif /* CONFIG_SMP */ #define MODINFO_ATTR(field) \ static void setup_modinfo_##field(struct module *mod, const char *s) \ { \ mod->field = kstrdup(s, GFP_KERNEL); \ } \ static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ struct module_kobject *mk, char *buffer) \ { \ return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \ } \ static int modinfo_##field##_exists(struct module *mod) \ { \ return mod->field != NULL; \ } \ static void free_modinfo_##field(struct module *mod) \ { \ kfree(mod->field); \ mod->field = NULL; \ } \ static struct module_attribute modinfo_##field = { \ .attr = { .name = __stringify(field), .mode = 0444 }, \ .show = show_modinfo_##field, \ .setup = setup_modinfo_##field, \ .test = modinfo_##field##_exists, \ .free = free_modinfo_##field, \ }; MODINFO_ATTR(version); MODINFO_ATTR(srcversion); static struct { char name[MODULE_NAME_LEN + 1]; char taints[MODULE_FLAGS_BUF_SIZE]; } last_unloaded_module; #ifdef CONFIG_MODULE_UNLOAD EXPORT_TRACEPOINT_SYMBOL(module_get); /* MODULE_REF_BASE is the base reference count by kmodule loader. */ #define MODULE_REF_BASE 1 /* Init the unload section of the module. */ static int module_unload_init(struct module *mod) { /* * Initialize reference counter to MODULE_REF_BASE. * refcnt == 0 means module is going. */ atomic_set(&mod->refcnt, MODULE_REF_BASE); INIT_LIST_HEAD(&mod->source_list); INIT_LIST_HEAD(&mod->target_list); /* Hold reference count during initialization. */ atomic_inc(&mod->refcnt); return 0; } /* Does a already use b? */ static int already_uses(struct module *a, struct module *b) { struct module_use *use; list_for_each_entry(use, &b->source_list, source_list) { if (use->source == a) return 1; } pr_debug("%s does not use %s!\n", a->name, b->name); return 0; } /* * Module a uses b * - we add 'a' as a "source", 'b' as a "target" of module use * - the module_use is added to the list of 'b' sources (so * 'b' can walk the list to see who sourced them), and of 'a' * targets (so 'a' can see what modules it targets). */ static int add_module_usage(struct module *a, struct module *b) { struct module_use *use; pr_debug("Allocating new usage for %s.\n", a->name); use = kmalloc(sizeof(*use), GFP_ATOMIC); if (!use) return -ENOMEM; use->source = a; use->target = b; list_add(&use->source_list, &b->source_list); list_add(&use->target_list, &a->target_list); return 0; } /* Module a uses b: caller needs module_mutex() */ static int ref_module(struct module *a, struct module *b) { int err; if (b == NULL || already_uses(a, b)) return 0; /* If module isn't available, we fail. */ err = strong_try_module_get(b); if (err) return err; err = add_module_usage(a, b); if (err) { module_put(b); return err; } return 0; } /* Clear the unload stuff of the module. */ static void module_unload_free(struct module *mod) { struct module_use *use, *tmp; mutex_lock(&module_mutex); list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { struct module *i = use->target; pr_debug("%s unusing %s\n", mod->name, i->name); module_put(i); list_del(&use->source_list); list_del(&use->target_list); kfree(use); } mutex_unlock(&module_mutex); } #ifdef CONFIG_MODULE_FORCE_UNLOAD static inline int try_force_unload(unsigned int flags) { int ret = (flags & O_TRUNC); if (ret) add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE); return ret; } #else static inline int try_force_unload(unsigned int flags) { return 0; } #endif /* CONFIG_MODULE_FORCE_UNLOAD */ /* Try to release refcount of module, 0 means success. */ static int try_release_module_ref(struct module *mod) { int ret; /* Try to decrement refcnt which we set at loading */ ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); BUG_ON(ret < 0); if (ret) /* Someone can put this right now, recover with checking */ ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); return ret; } static int try_stop_module(struct module *mod, int flags, int *forced) { /* If it's not unused, quit unless we're forcing. */ if (try_release_module_ref(mod) != 0) { *forced = try_force_unload(flags); if (!(*forced)) return -EWOULDBLOCK; } /* Mark it as dying. */ mod->state = MODULE_STATE_GOING; return 0; } /** * module_refcount() - return the refcount or -1 if unloading * @mod: the module we're checking * * Return: * -1 if the module is in the process of unloading * otherwise the number of references in the kernel to the module */ int module_refcount(struct module *mod) { return atomic_read(&mod->refcnt) - MODULE_REF_BASE; } EXPORT_SYMBOL(module_refcount); /* This exists whether we can unload or not */ static void free_module(struct module *mod); SYSCALL_DEFINE2(delete_module, const char __user *, name_user, unsigned int, flags) { struct module *mod; char name[MODULE_NAME_LEN]; char buf[MODULE_FLAGS_BUF_SIZE]; int ret, forced = 0; if (!capable(CAP_SYS_MODULE) || modules_disabled) return -EPERM; if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) return -EFAULT; name[MODULE_NAME_LEN-1] = '\0'; audit_log_kern_module(name); if (mutex_lock_interruptible(&module_mutex) != 0) return -EINTR; mod = find_module(name); if (!mod) { ret = -ENOENT; goto out; } if (!list_empty(&mod->source_list)) { /* Other modules depend on us: get rid of them first. */ ret = -EWOULDBLOCK; goto out; } /* Doing init or already dying? */ if (mod->state != MODULE_STATE_LIVE) { /* FIXME: if (force), slam module count damn the torpedoes */ pr_debug("%s already dying\n", mod->name); ret = -EBUSY; goto out; } /* If it has an init func, it must have an exit func to unload */ if (mod->init && !mod->exit) { forced = try_force_unload(flags); if (!forced) { /* This module can't be removed */ ret = -EBUSY; goto out; } } ret = try_stop_module(mod, flags, &forced); if (ret != 0) goto out; mutex_unlock(&module_mutex); /* Final destruction now no one is using it. */ if (mod->exit != NULL) mod->exit(); blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_GOING, mod); klp_module_going(mod); ftrace_release_mod(mod); async_synchronize_full(); /* Store the name and taints of the last unloaded module for diagnostic purposes */ strscpy(last_unloaded_module.name, mod->name, sizeof(last_unloaded_module.name)); strscpy(last_unloaded_module.taints, module_flags(mod, buf, false), sizeof(last_unloaded_module.taints)); free_module(mod); /* someone could wait for the module in add_unformed_module() */ wake_up_all(&module_wq); return 0; out: mutex_unlock(&module_mutex); return ret; } void __symbol_put(const char *symbol) { struct find_symbol_arg fsa = { .name = symbol, .gplok = true, }; preempt_disable(); BUG_ON(!find_symbol(&fsa)); module_put(fsa.owner); preempt_enable(); } EXPORT_SYMBOL(__symbol_put); /* Note this assumes addr is a function, which it currently always is. */ void symbol_put_addr(void *addr) { struct module *modaddr; unsigned long a = (unsigned long)dereference_function_descriptor(addr); if (core_kernel_text(a)) return; /* * Even though we hold a reference on the module; we still need to * disable preemption in order to safely traverse the data structure. */ preempt_disable(); modaddr = __module_text_address(a); BUG_ON(!modaddr); module_put(modaddr); preempt_enable(); } EXPORT_SYMBOL_GPL(symbol_put_addr); static ssize_t show_refcnt(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { return sprintf(buffer, "%i\n", module_refcount(mk->mod)); } static struct module_attribute modinfo_refcnt = __ATTR(refcnt, 0444, show_refcnt, NULL); void __module_get(struct module *module) { if (module) { atomic_inc(&module->refcnt); trace_module_get(module, _RET_IP_); } } EXPORT_SYMBOL(__module_get); bool try_module_get(struct module *module) { bool ret = true; if (module) { /* Note: here, we can fail to get a reference */ if (likely(module_is_live(module) && atomic_inc_not_zero(&module->refcnt) != 0)) trace_module_get(module, _RET_IP_); else ret = false; } return ret; } EXPORT_SYMBOL(try_module_get); void module_put(struct module *module) { int ret; if (module) { ret = atomic_dec_if_positive(&module->refcnt); WARN_ON(ret < 0); /* Failed to put refcount */ trace_module_put(module, _RET_IP_); } } EXPORT_SYMBOL(module_put); #else /* !CONFIG_MODULE_UNLOAD */ static inline void module_unload_free(struct module *mod) { } static int ref_module(struct module *a, struct module *b) { return strong_try_module_get(b); } static inline int module_unload_init(struct module *mod) { return 0; } #endif /* CONFIG_MODULE_UNLOAD */ size_t module_flags_taint(unsigned long taints, char *buf) { size_t l = 0; int i; for (i = 0; i < TAINT_FLAGS_COUNT; i++) { if (taint_flags[i].module && test_bit(i, &taints)) buf[l++] = taint_flags[i].c_true; } return l; } static ssize_t show_initstate(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { const char *state = "unknown"; switch (mk->mod->state) { case MODULE_STATE_LIVE: state = "live"; break; case MODULE_STATE_COMING: state = "coming"; break; case MODULE_STATE_GOING: state = "going"; break; default: BUG(); } return sprintf(buffer, "%s\n", state); } static struct module_attribute modinfo_initstate = __ATTR(initstate, 0444, show_initstate, NULL); static ssize_t store_uevent(struct module_attribute *mattr, struct module_kobject *mk, const char *buffer, size_t count) { int rc; rc = kobject_synth_uevent(&mk->kobj, buffer, count); return rc ? rc : count; } struct module_attribute module_uevent = __ATTR(uevent, 0200, NULL, store_uevent); static ssize_t show_coresize(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { unsigned int size = mk->mod->mem[MOD_TEXT].size; if (!IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC)) { for_class_mod_mem_type(type, core_data) size += mk->mod->mem[type].size; } return sprintf(buffer, "%u\n", size); } static struct module_attribute modinfo_coresize = __ATTR(coresize, 0444, show_coresize, NULL); #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC static ssize_t show_datasize(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { unsigned int size = 0; for_class_mod_mem_type(type, core_data) size += mk->mod->mem[type].size; return sprintf(buffer, "%u\n", size); } static struct module_attribute modinfo_datasize = __ATTR(datasize, 0444, show_datasize, NULL); #endif static ssize_t show_initsize(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { unsigned int size = 0; for_class_mod_mem_type(type, init) size += mk->mod->mem[type].size; return sprintf(buffer, "%u\n", size); } static struct module_attribute modinfo_initsize = __ATTR(initsize, 0444, show_initsize, NULL); static ssize_t show_taint(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { size_t l; l = module_flags_taint(mk->mod->taints, buffer); buffer[l++] = '\n'; return l; } static struct module_attribute modinfo_taint = __ATTR(taint, 0444, show_taint, NULL); struct module_attribute *modinfo_attrs[] = { &module_uevent, &modinfo_version, &modinfo_srcversion, &modinfo_initstate, &modinfo_coresize, #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC &modinfo_datasize, #endif &modinfo_initsize, &modinfo_taint, #ifdef CONFIG_MODULE_UNLOAD &modinfo_refcnt, #endif NULL, }; size_t modinfo_attrs_count = ARRAY_SIZE(modinfo_attrs); static const char vermagic[] = VERMAGIC_STRING; int try_to_force_load(struct module *mod, const char *reason) { #ifdef CONFIG_MODULE_FORCE_LOAD if (!test_taint(TAINT_FORCED_MODULE)) pr_warn("%s: %s: kernel tainted.\n", mod->name, reason); add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE); return 0; #else return -ENOEXEC; #endif } /* Parse tag=value strings from .modinfo section */ char *module_next_tag_pair(char *string, unsigned long *secsize) { /* Skip non-zero chars */ while (string[0]) { string++; if ((*secsize)-- <= 1) return NULL; } /* Skip any zero padding. */ while (!string[0]) { string++; if ((*secsize)-- <= 1) return NULL; } return string; } static char *get_next_modinfo(const struct load_info *info, const char *tag, char *prev) { char *p; unsigned int taglen = strlen(tag); Elf_Shdr *infosec = &info->sechdrs[info->index.info]; unsigned long size = infosec->sh_size; /* * get_modinfo() calls made before rewrite_section_headers() * must use sh_offset, as sh_addr isn't set! */ char *modinfo = (char *)info->hdr + infosec->sh_offset; if (prev) { size -= prev - modinfo; modinfo = module_next_tag_pair(prev, &size); } for (p = modinfo; p; p = module_next_tag_pair(p, &size)) { if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') return p + taglen + 1; } return NULL; } static char *get_modinfo(const struct load_info *info, const char *tag) { return get_next_modinfo(info, tag, NULL); } static int verify_namespace_is_imported(const struct load_info *info, const struct kernel_symbol *sym, struct module *mod) { const char *namespace; char *imported_namespace; namespace = kernel_symbol_namespace(sym); if (namespace && namespace[0]) { for_each_modinfo_entry(imported_namespace, info, "import_ns") { if (strcmp(namespace, imported_namespace) == 0) return 0; } #ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS pr_warn( #else pr_err( #endif "%s: module uses symbol (%s) from namespace %s, but does not import it.\n", mod->name, kernel_symbol_name(sym), namespace); #ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS return -EINVAL; #endif } return 0; } static bool inherit_taint(struct module *mod, struct module *owner, const char *name) { if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints)) return true; if (mod->using_gplonly_symbols) { pr_err("%s: module using GPL-only symbols uses symbols %s from proprietary module %s.\n", mod->name, name, owner->name); return false; } if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) { pr_warn("%s: module uses symbols %s from proprietary module %s, inheriting taint.\n", mod->name, name, owner->name); set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints); } return true; } /* Resolve a symbol for this module. I.e. if we find one, record usage. */ static const struct kernel_symbol *resolve_symbol(struct module *mod, const struct load_info *info, const char *name, char ownername[]) { struct find_symbol_arg fsa = { .name = name, .gplok = !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), .warn = true, }; int err; /* * The module_mutex should not be a heavily contended lock; * if we get the occasional sleep here, we'll go an extra iteration * in the wait_event_interruptible(), which is harmless. */ sched_annotate_sleep(); mutex_lock(&module_mutex); if (!find_symbol(&fsa)) goto unlock; if (fsa.license == GPL_ONLY) mod->using_gplonly_symbols = true; if (!inherit_taint(mod, fsa.owner, name)) { fsa.sym = NULL; goto getname; } if (!check_version(info, name, mod, fsa.crc)) { fsa.sym = ERR_PTR(-EINVAL); goto getname; } err = verify_namespace_is_imported(info, fsa.sym, mod); if (err) { fsa.sym = ERR_PTR(err); goto getname; } err = ref_module(mod, fsa.owner); if (err) { fsa.sym = ERR_PTR(err); goto getname; } getname: /* We must make copy under the lock if we failed to get ref. */ strncpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN); unlock: mutex_unlock(&module_mutex); return fsa.sym; } static const struct kernel_symbol * resolve_symbol_wait(struct module *mod, const struct load_info *info, const char *name) { const struct kernel_symbol *ksym; char owner[MODULE_NAME_LEN]; if (wait_event_interruptible_timeout(module_wq, !IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) || PTR_ERR(ksym) != -EBUSY, 30 * HZ) <= 0) { pr_warn("%s: gave up waiting for init of module %s.\n", mod->name, owner); } return ksym; } void __weak module_arch_cleanup(struct module *mod) { } void __weak module_arch_freeing_init(struct module *mod) { } static int module_memory_alloc(struct module *mod, enum mod_mem_type type) { unsigned int size = PAGE_ALIGN(mod->mem[type].size); enum execmem_type execmem_type; void *ptr; mod->mem[type].size = size; if (mod_mem_type_is_data(type)) execmem_type = EXECMEM_MODULE_DATA; else execmem_type = EXECMEM_MODULE_TEXT; ptr = execmem_alloc(execmem_type, size); if (!ptr) return -ENOMEM; /* * The pointer to these blocks of memory are stored on the module * structure and we keep that around so long as the module is * around. We only free that memory when we unload the module. * Just mark them as not being a leak then. The .init* ELF * sections *do* get freed after boot so we *could* treat them * slightly differently with kmemleak_ignore() and only grey * them out as they work as typical memory allocations which * *do* eventually get freed, but let's just keep things simple * and avoid *any* false positives. */ kmemleak_not_leak(ptr); memset(ptr, 0, size); mod->mem[type].base = ptr; return 0; } static void module_memory_free(struct module *mod, enum mod_mem_type type, bool unload_codetags) { void *ptr = mod->mem[type].base; if (!unload_codetags && mod_mem_type_is_core_data(type)) return; execmem_free(ptr); } static void free_mod_mem(struct module *mod, bool unload_codetags) { for_each_mod_mem_type(type) { struct module_memory *mod_mem = &mod->mem[type]; if (type == MOD_DATA) continue; /* Free lock-classes; relies on the preceding sync_rcu(). */ lockdep_free_key_range(mod_mem->base, mod_mem->size); if (mod_mem->size) module_memory_free(mod, type, unload_codetags); } /* MOD_DATA hosts mod, so free it at last */ lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size); module_memory_free(mod, MOD_DATA, unload_codetags); } /* Free a module, remove from lists, etc. */ static void free_module(struct module *mod) { bool unload_codetags; trace_module_free(mod); unload_codetags = codetag_unload_module(mod); if (!unload_codetags) pr_warn("%s: memory allocation(s) from the module still alive, cannot unload cleanly\n", mod->name); mod_sysfs_teardown(mod); /* * We leave it in list to prevent duplicate loads, but make sure * that noone uses it while it's being deconstructed. */ mutex_lock(&module_mutex); mod->state = MODULE_STATE_UNFORMED; mutex_unlock(&module_mutex); /* Arch-specific cleanup. */ module_arch_cleanup(mod); /* Module unload stuff */ module_unload_free(mod); /* Free any allocated parameters. */ destroy_params(mod->kp, mod->num_kp); if (is_livepatch_module(mod)) free_module_elf(mod); /* Now we can delete it from the lists */ mutex_lock(&module_mutex); /* Unlink carefully: kallsyms could be walking list. */ list_del_rcu(&mod->list); mod_tree_remove(mod); /* Remove this module from bug list, this uses list_del_rcu */ module_bug_cleanup(mod); /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ synchronize_rcu(); if (try_add_tainted_module(mod)) pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n", mod->name); mutex_unlock(&module_mutex); /* This may be empty, but that's OK */ module_arch_freeing_init(mod); kfree(mod->args); percpu_modfree(mod); free_mod_mem(mod, unload_codetags); } void *__symbol_get(const char *symbol) { struct find_symbol_arg fsa = { .name = symbol, .gplok = true, .warn = true, }; preempt_disable(); if (!find_symbol(&fsa)) goto fail; if (fsa.license != GPL_ONLY) { pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n", symbol); goto fail; } if (strong_try_module_get(fsa.owner)) goto fail; preempt_enable(); return (void *)kernel_symbol_value(fsa.sym); fail: preempt_enable(); return NULL; } EXPORT_SYMBOL_GPL(__symbol_get); /* * Ensure that an exported symbol [global namespace] does not already exist * in the kernel or in some other module's exported symbol table. * * You must hold the module_mutex. */ static int verify_exported_symbols(struct module *mod) { unsigned int i; const struct kernel_symbol *s; struct { const struct kernel_symbol *sym; unsigned int num; } arr[] = { { mod->syms, mod->num_syms }, { mod->gpl_syms, mod->num_gpl_syms }, }; for (i = 0; i < ARRAY_SIZE(arr); i++) { for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { struct find_symbol_arg fsa = { .name = kernel_symbol_name(s), .gplok = true, }; if (find_symbol(&fsa)) { pr_err("%s: exports duplicate symbol %s" " (owned by %s)\n", mod->name, kernel_symbol_name(s), module_name(fsa.owner)); return -ENOEXEC; } } } return 0; } static bool ignore_undef_symbol(Elf_Half emachine, const char *name) { /* * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64. * i386 has a similar problem but may not deserve a fix. * * If we ever have to ignore many symbols, consider refactoring the code to * only warn if referenced by a relocation. */ if (emachine == EM_386 || emachine == EM_X86_64) return !strcmp(name, "_GLOBAL_OFFSET_TABLE_"); return false; } /* Change all symbols so that st_value encodes the pointer directly. */ static int simplify_symbols(struct module *mod, const struct load_info *info) { Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; Elf_Sym *sym = (void *)symsec->sh_addr; unsigned long secbase; unsigned int i; int ret = 0; const struct kernel_symbol *ksym; for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { const char *name = info->strtab + sym[i].st_name; switch (sym[i].st_shndx) { case SHN_COMMON: /* Ignore common symbols */ if (!strncmp(name, "__gnu_lto", 9)) break; /* * We compiled with -fno-common. These are not * supposed to happen. */ pr_debug("Common symbol: %s\n", name); pr_warn("%s: please compile with -fno-common\n", mod->name); ret = -ENOEXEC; break; case SHN_ABS: /* Don't need to do anything */ pr_debug("Absolute symbol: 0x%08lx %s\n", (long)sym[i].st_value, name); break; case SHN_LIVEPATCH: /* Livepatch symbols are resolved by livepatch */ break; case SHN_UNDEF: ksym = resolve_symbol_wait(mod, info, name); /* Ok if resolved. */ if (ksym && !IS_ERR(ksym)) { sym[i].st_value = kernel_symbol_value(ksym); break; } /* Ok if weak or ignored. */ if (!ksym && (ELF_ST_BIND(sym[i].st_info) == STB_WEAK || ignore_undef_symbol(info->hdr->e_machine, name))) break; ret = PTR_ERR(ksym) ?: -ENOENT; pr_warn("%s: Unknown symbol %s (err %d)\n", mod->name, name, ret); break; default: /* Divert to percpu allocation if a percpu var. */ if (sym[i].st_shndx == info->index.pcpu) secbase = (unsigned long)mod_percpu(mod); else secbase = info->sechdrs[sym[i].st_shndx].sh_addr; sym[i].st_value += secbase; break; } } return ret; } static int apply_relocations(struct module *mod, const struct load_info *info) { unsigned int i; int err = 0; /* Now do relocations. */ for (i = 1; i < info->hdr->e_shnum; i++) { unsigned int infosec = info->sechdrs[i].sh_info; /* Not a valid relocation section? */ if (infosec >= info->hdr->e_shnum) continue; /* Don't bother with non-allocated sections */ if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) continue; if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH) err = klp_apply_section_relocs(mod, info->sechdrs, info->secstrings, info->strtab, info->index.sym, i, NULL); else if (info->sechdrs[i].sh_type == SHT_REL) err = apply_relocate(info->sechdrs, info->strtab, info->index.sym, i, mod); else if (info->sechdrs[i].sh_type == SHT_RELA) err = apply_relocate_add(info->sechdrs, info->strtab, info->index.sym, i, mod); if (err < 0) break; } return err; } /* Additional bytes needed by arch in front of individual sections */ unsigned int __weak arch_mod_section_prepend(struct module *mod, unsigned int section) { /* default implementation just returns zero */ return 0; } long module_get_offset_and_type(struct module *mod, enum mod_mem_type type, Elf_Shdr *sechdr, unsigned int section) { long offset; long mask = ((unsigned long)(type) & SH_ENTSIZE_TYPE_MASK) << SH_ENTSIZE_TYPE_SHIFT; mod->mem[type].size += arch_mod_section_prepend(mod, section); offset = ALIGN(mod->mem[type].size, sechdr->sh_addralign ?: 1); mod->mem[type].size = offset + sechdr->sh_size; WARN_ON_ONCE(offset & mask); return offset | mask; } bool module_init_layout_section(const char *sname) { #ifndef CONFIG_MODULE_UNLOAD if (module_exit_section(sname)) return true; #endif return module_init_section(sname); } static void __layout_sections(struct module *mod, struct load_info *info, bool is_init) { unsigned int m, i; static const unsigned long masks[][2] = { /* * NOTE: all executable code must be the first section * in this array; otherwise modify the text_size * finder in the two loops below */ { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL }, { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL }, { ARCH_SHF_SMALL | SHF_ALLOC, 0 } }; static const int core_m_to_mem_type[] = { MOD_TEXT, MOD_RODATA, MOD_RO_AFTER_INIT, MOD_DATA, MOD_DATA, }; static const int init_m_to_mem_type[] = { MOD_INIT_TEXT, MOD_INIT_RODATA, MOD_INVALID, MOD_INIT_DATA, MOD_INIT_DATA, }; for (m = 0; m < ARRAY_SIZE(masks); ++m) { enum mod_mem_type type = is_init ? init_m_to_mem_type[m] : core_m_to_mem_type[m]; for (i = 0; i < info->hdr->e_shnum; ++i) { Elf_Shdr *s = &info->sechdrs[i]; const char *sname = info->secstrings + s->sh_name; if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL || is_init != module_init_layout_section(sname)) continue; if (WARN_ON_ONCE(type == MOD_INVALID)) continue; s->sh_entsize = module_get_offset_and_type(mod, type, s, i); pr_debug("\t%s\n", sname); } } } /* * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld * might -- code, read-only data, read-write data, small data. Tally * sizes, and place the offsets into sh_entsize fields: high bit means it * belongs in init. */ static void layout_sections(struct module *mod, struct load_info *info) { unsigned int i; for (i = 0; i < info->hdr->e_shnum; i++) info->sechdrs[i].sh_entsize = ~0UL; pr_debug("Core section allocation order for %s:\n", mod->name); __layout_sections(mod, info, false); pr_debug("Init section allocation order for %s:\n", mod->name); __layout_sections(mod, info, true); } static void module_license_taint_check(struct module *mod, const char *license) { if (!license) license = "unspecified"; if (!license_is_gpl_compatible(license)) { if (!test_taint(TAINT_PROPRIETARY_MODULE)) pr_warn("%s: module license '%s' taints kernel.\n", mod->name, license); add_taint_module(mod, TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); } } static void setup_modinfo(struct module *mod, struct load_info *info) { struct module_attribute *attr; int i; for (i = 0; (attr = modinfo_attrs[i]); i++) { if (attr->setup) attr->setup(mod, get_modinfo(info, attr->attr.name)); } } static void free_modinfo(struct module *mod) { struct module_attribute *attr; int i; for (i = 0; (attr = modinfo_attrs[i]); i++) { if (attr->free) attr->free(mod); } } bool __weak module_init_section(const char *name) { return strstarts(name, ".init"); } bool __weak module_exit_section(const char *name) { return strstarts(name, ".exit"); } static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr) { #if defined(CONFIG_64BIT) unsigned long long secend; #else unsigned long secend; #endif /* * Check for both overflow and offset/size being * too large. */ secend = shdr->sh_offset + shdr->sh_size; if (secend < shdr->sh_offset || secend > info->len) return -ENOEXEC; return 0; } /* * Check userspace passed ELF module against our expectations, and cache * useful variables for further processing as we go. * * This does basic validity checks against section offsets and sizes, the * section name string table, and the indices used for it (sh_name). * * As a last step, since we're already checking the ELF sections we cache * useful variables which will be used later for our convenience: * * o pointers to section headers * o cache the modinfo symbol section * o cache the string symbol section * o cache the module section * * As a last step we set info->mod to the temporary copy of the module in * info->hdr. The final one will be allocated in move_module(). Any * modifications we make to our copy of the module will be carried over * to the final minted module. */ static int elf_validity_cache_copy(struct load_info *info, int flags) { unsigned int i; Elf_Shdr *shdr, *strhdr; int err; unsigned int num_mod_secs = 0, mod_idx; unsigned int num_info_secs = 0, info_idx; unsigned int num_sym_secs = 0, sym_idx; if (info->len < sizeof(*(info->hdr))) { pr_err("Invalid ELF header len %lu\n", info->len); goto no_exec; } if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) { pr_err("Invalid ELF header magic: != %s\n", ELFMAG); goto no_exec; } if (info->hdr->e_type != ET_REL) { pr_err("Invalid ELF header type: %u != %u\n", info->hdr->e_type, ET_REL); goto no_exec; } if (!elf_check_arch(info->hdr)) { pr_err("Invalid architecture in ELF header: %u\n", info->hdr->e_machine); goto no_exec; } if (!module_elf_check_arch(info->hdr)) { pr_err("Invalid module architecture in ELF header: %u\n", info->hdr->e_machine); goto no_exec; } if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) { pr_err("Invalid ELF section header size\n"); goto no_exec; } /* * e_shnum is 16 bits, and sizeof(Elf_Shdr) is * known and small. So e_shnum * sizeof(Elf_Shdr) * will not overflow unsigned long on any platform. */ if (info->hdr->e_shoff >= info->len || (info->hdr->e_shnum * sizeof(Elf_Shdr) > info->len - info->hdr->e_shoff)) { pr_err("Invalid ELF section header overflow\n"); goto no_exec; } info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; /* * Verify if the section name table index is valid. */ if (info->hdr->e_shstrndx == SHN_UNDEF || info->hdr->e_shstrndx >= info->hdr->e_shnum) { pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n", info->hdr->e_shstrndx, info->hdr->e_shstrndx, info->hdr->e_shnum); goto no_exec; } strhdr = &info->sechdrs[info->hdr->e_shstrndx]; err = validate_section_offset(info, strhdr); if (err < 0) { pr_err("Invalid ELF section hdr(type %u)\n", strhdr->sh_type); return err; } /* * The section name table must be NUL-terminated, as required * by the spec. This makes strcmp and pr_* calls that access * strings in the section safe. */ info->secstrings = (void *)info->hdr + strhdr->sh_offset; if (strhdr->sh_size == 0) { pr_err("empty section name table\n"); goto no_exec; } if (info->secstrings[strhdr->sh_size - 1] != '\0') { pr_err("ELF Spec violation: section name table isn't null terminated\n"); goto no_exec; } /* * The code assumes that section 0 has a length of zero and * an addr of zero, so check for it. */ if (info->sechdrs[0].sh_type != SHT_NULL || info->sechdrs[0].sh_size != 0 || info->sechdrs[0].sh_addr != 0) { pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n", info->sechdrs[0].sh_type); goto no_exec; } for (i = 1; i < info->hdr->e_shnum; i++) { shdr = &info->sechdrs[i]; switch (shdr->sh_type) { case SHT_NULL: case SHT_NOBITS: continue; case SHT_SYMTAB: if (shdr->sh_link == SHN_UNDEF || shdr->sh_link >= info->hdr->e_shnum) { pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n", shdr->sh_link, shdr->sh_link, info->hdr->e_shnum); goto no_exec; } num_sym_secs++; sym_idx = i; fallthrough; default: err = validate_section_offset(info, shdr); if (err < 0) { pr_err("Invalid ELF section in module (section %u type %u)\n", i, shdr->sh_type); return err; } if (strcmp(info->secstrings + shdr->sh_name, ".gnu.linkonce.this_module") == 0) { num_mod_secs++; mod_idx = i; } else if (strcmp(info->secstrings + shdr->sh_name, ".modinfo") == 0) { num_info_secs++; info_idx = i; } if (shdr->sh_flags & SHF_ALLOC) { if (shdr->sh_name >= strhdr->sh_size) { pr_err("Invalid ELF section name in module (section %u type %u)\n", i, shdr->sh_type); return -ENOEXEC; } } break; } } if (num_info_secs > 1) { pr_err("Only one .modinfo section must exist.\n"); goto no_exec; } else if (num_info_secs == 1) { /* Try to find a name early so we can log errors with a module name */ info->index.info = info_idx; info->name = get_modinfo(info, "name"); } if (num_sym_secs != 1) { pr_warn("%s: module has no symbols (stripped?)\n", info->name ?: "(missing .modinfo section or name field)"); goto no_exec; } /* Sets internal symbols and strings. */ info->index.sym = sym_idx; shdr = &info->sechdrs[sym_idx]; info->index.str = shdr->sh_link; info->strtab = (char *)info->hdr + info->sechdrs[info->index.str].sh_offset; /* * The ".gnu.linkonce.this_module" ELF section is special. It is * what modpost uses to refer to __this_module and let's use rely * on THIS_MODULE to point to &__this_module properly. The kernel's * modpost declares it on each modules's *.mod.c file. If the struct * module of the kernel changes a full kernel rebuild is required. * * We have a few expectaions for this special section, the following * code validates all this for us: * * o Only one section must exist * o We expect the kernel to always have to allocate it: SHF_ALLOC * o The section size must match the kernel's run time's struct module * size */ if (num_mod_secs != 1) { pr_err("module %s: Only one .gnu.linkonce.this_module section must exist.\n", info->name ?: "(missing .modinfo section or name field)"); goto no_exec; } shdr = &info->sechdrs[mod_idx]; /* * This is already implied on the switch above, however let's be * pedantic about it. */ if (shdr->sh_type == SHT_NOBITS) { pr_err("module %s: .gnu.linkonce.this_module section must have a size set\n", info->name ?: "(missing .modinfo section or name field)"); goto no_exec; } if (!(shdr->sh_flags & SHF_ALLOC)) { pr_err("module %s: .gnu.linkonce.this_module must occupy memory during process execution\n", info->name ?: "(missing .modinfo section or name field)"); goto no_exec; } if (shdr->sh_size != sizeof(struct module)) { pr_err("module %s: .gnu.linkonce.this_module section size must match the kernel's built struct module size at run time\n", info->name ?: "(missing .modinfo section or name field)"); goto no_exec; } info->index.mod = mod_idx; /* This is temporary: point mod into copy of data. */ info->mod = (void *)info->hdr + shdr->sh_offset; /* * If we didn't load the .modinfo 'name' field earlier, fall back to * on-disk struct mod 'name' field. */ if (!info->name) info->name = info->mod->name; if (flags & MODULE_INIT_IGNORE_MODVERSIONS) info->index.vers = 0; /* Pretend no __versions section! */ else info->index.vers = find_sec(info, "__versions"); info->index.pcpu = find_pcpusec(info); return 0; no_exec: return -ENOEXEC; } #define COPY_CHUNK_SIZE (16*PAGE_SIZE) static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len) { do { unsigned long n = min(len, COPY_CHUNK_SIZE); if (copy_from_user(dst, usrc, n) != 0) return -EFAULT; cond_resched(); dst += n; usrc += n; len -= n; } while (len); return 0; } static int check_modinfo_livepatch(struct module *mod, struct load_info *info) { if (!get_modinfo(info, "livepatch")) /* Nothing more to do */ return 0; if (set_livepatch_module(mod)) return 0; pr_err("%s: module is marked as livepatch module, but livepatch support is disabled", mod->name); return -ENOEXEC; } static void check_modinfo_retpoline(struct module *mod, struct load_info *info) { if (retpoline_module_ok(get_modinfo(info, "retpoline"))) return; pr_warn("%s: loading module not compiled with retpoline compiler.\n", mod->name); } /* Sets info->hdr and info->len. */ static int copy_module_from_user(const void __user *umod, unsigned long len, struct load_info *info) { int err; info->len = len; if (info->len < sizeof(*(info->hdr))) return -ENOEXEC; err = security_kernel_load_data(LOADING_MODULE, true); if (err) return err; /* Suck in entire file: we'll want most of it. */ info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN); if (!info->hdr) return -ENOMEM; if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) { err = -EFAULT; goto out; } err = security_kernel_post_load_data((char *)info->hdr, info->len, LOADING_MODULE, "init_module"); out: if (err) vfree(info->hdr); return err; } static void free_copy(struct load_info *info, int flags) { if (flags & MODULE_INIT_COMPRESSED_FILE) module_decompress_cleanup(info); else vfree(info->hdr); } static int rewrite_section_headers(struct load_info *info, int flags) { unsigned int i; /* This should always be true, but let's be sure. */ info->sechdrs[0].sh_addr = 0; for (i = 1; i < info->hdr->e_shnum; i++) { Elf_Shdr *shdr = &info->sechdrs[i]; /* * Mark all sections sh_addr with their address in the * temporary image. */ shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; } /* Track but don't keep modinfo and version sections. */ info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; return 0; } /* * These calls taint the kernel depending certain module circumstances */ static void module_augment_kernel_taints(struct module *mod, struct load_info *info) { int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE); if (!get_modinfo(info, "intree")) { if (!test_taint(TAINT_OOT_MODULE)) pr_warn("%s: loading out-of-tree module taints kernel.\n", mod->name); add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); } check_modinfo_retpoline(mod, info); if (get_modinfo(info, "staging")) { add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); pr_warn("%s: module is from the staging directory, the quality " "is unknown, you have been warned.\n", mod->name); } if (is_livepatch_module(mod)) { add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK); pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n", mod->name); } module_license_taint_check(mod, get_modinfo(info, "license")); if (get_modinfo(info, "test")) { if (!test_taint(TAINT_TEST)) pr_warn("%s: loading test module taints kernel.\n", mod->name); add_taint_module(mod, TAINT_TEST, LOCKDEP_STILL_OK); } #ifdef CONFIG_MODULE_SIG mod->sig_ok = info->sig_ok; if (!mod->sig_ok) { pr_notice_once("%s: module verification failed: signature " "and/or required key missing - tainting " "kernel\n", mod->name); add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); } #endif /* * ndiswrapper is under GPL by itself, but loads proprietary modules. * Don't use add_taint_module(), as it would prevent ndiswrapper from * using GPL-only symbols it needs. */ if (strcmp(mod->name, "ndiswrapper") == 0) add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); /* driverloader was caught wrongly pretending to be under GPL */ if (strcmp(mod->name, "driverloader") == 0) add_taint_module(mod, TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); /* lve claims to be GPL but upstream won't provide source */ if (strcmp(mod->name, "lve") == 0) add_taint_module(mod, TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE)) pr_warn("%s: module license taints kernel.\n", mod->name); } static int check_modinfo(struct module *mod, struct load_info *info, int flags) { const char *modmagic = get_modinfo(info, "vermagic"); int err; if (flags & MODULE_INIT_IGNORE_VERMAGIC) modmagic = NULL; /* This is allowed: modprobe --force will invalidate it. */ if (!modmagic) { err = try_to_force_load(mod, "bad vermagic"); if (err) return err; } else if (!same_magic(modmagic, vermagic, info->index.vers)) { pr_err("%s: version magic '%s' should be '%s'\n", info->name, modmagic, vermagic); return -ENOEXEC; } err = check_modinfo_livepatch(mod, info); if (err) return err; return 0; } static int find_module_sections(struct module *mod, struct load_info *info) { mod->kp = section_objs(info, "__param", sizeof(*mod->kp), &mod->num_kp); mod->syms = section_objs(info, "__ksymtab", sizeof(*mod->syms), &mod->num_syms); mod->crcs = section_addr(info, "__kcrctab"); mod->gpl_syms = section_objs(info, "__ksymtab_gpl", sizeof(*mod->gpl_syms), &mod->num_gpl_syms); mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); #ifdef CONFIG_CONSTRUCTORS mod->ctors = section_objs(info, ".ctors", sizeof(*mod->ctors), &mod->num_ctors); if (!mod->ctors) mod->ctors = section_objs(info, ".init_array", sizeof(*mod->ctors), &mod->num_ctors); else if (find_sec(info, ".init_array")) { /* * This shouldn't happen with same compiler and binutils * building all parts of the module. */ pr_warn("%s: has both .ctors and .init_array.\n", mod->name); return -EINVAL; } #endif mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1, &mod->noinstr_text_size); #ifdef CONFIG_TRACEPOINTS mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", sizeof(*mod->tracepoints_ptrs), &mod->num_tracepoints); #endif #ifdef CONFIG_TREE_SRCU mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs", sizeof(*mod->srcu_struct_ptrs), &mod->num_srcu_structs); #endif #ifdef CONFIG_BPF_EVENTS mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map", sizeof(*mod->bpf_raw_events), &mod->num_bpf_raw_events); #endif #ifdef CONFIG_DEBUG_INFO_BTF_MODULES mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size); mod->btf_base_data = any_section_objs(info, ".BTF.base", 1, &mod->btf_base_data_size); #endif #ifdef CONFIG_JUMP_LABEL mod->jump_entries = section_objs(info, "__jump_table", sizeof(*mod->jump_entries), &mod->num_jump_entries); #endif #ifdef CONFIG_EVENT_TRACING mod->trace_events = section_objs(info, "_ftrace_events", sizeof(*mod->trace_events), &mod->num_trace_events); mod->trace_evals = section_objs(info, "_ftrace_eval_map", sizeof(*mod->trace_evals), &mod->num_trace_evals); #endif #ifdef CONFIG_TRACING mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", sizeof(*mod->trace_bprintk_fmt_start), &mod->num_trace_bprintk_fmt); #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD /* sechdrs[0].sh_size is always zero */ mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION, sizeof(*mod->ftrace_callsites), &mod->num_ftrace_callsites); #endif #ifdef CONFIG_FUNCTION_ERROR_INJECTION mod->ei_funcs = section_objs(info, "_error_injection_whitelist", sizeof(*mod->ei_funcs), &mod->num_ei_funcs); #endif #ifdef CONFIG_KPROBES mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1, &mod->kprobes_text_size); mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist", sizeof(unsigned long), &mod->num_kprobe_blacklist); #endif #ifdef CONFIG_PRINTK_INDEX mod->printk_index_start = section_objs(info, ".printk_index", sizeof(*mod->printk_index_start), &mod->printk_index_size); #endif #ifdef CONFIG_HAVE_STATIC_CALL_INLINE mod->static_call_sites = section_objs(info, ".static_call_sites", sizeof(*mod->static_call_sites), &mod->num_static_call_sites); #endif #if IS_ENABLED(CONFIG_KUNIT) mod->kunit_suites = section_objs(info, ".kunit_test_suites", sizeof(*mod->kunit_suites), &mod->num_kunit_suites); mod->kunit_init_suites = section_objs(info, ".kunit_init_test_suites", sizeof(*mod->kunit_init_suites), &mod->num_kunit_init_suites); #endif mod->extable = section_objs(info, "__ex_table", sizeof(*mod->extable), &mod->num_exentries); if (section_addr(info, "__obsparm")) pr_warn("%s: Ignoring obsolete parameters\n", mod->name); #ifdef CONFIG_DYNAMIC_DEBUG_CORE mod->dyndbg_info.descs = section_objs(info, "__dyndbg", sizeof(*mod->dyndbg_info.descs), &mod->dyndbg_info.num_descs); mod->dyndbg_info.classes = section_objs(info, "__dyndbg_classes", sizeof(*mod->dyndbg_info.classes), &mod->dyndbg_info.num_classes); #endif return 0; } static int move_module(struct module *mod, struct load_info *info) { int i; enum mod_mem_type t = 0; int ret = -ENOMEM; for_each_mod_mem_type(type) { if (!mod->mem[type].size) { mod->mem[type].base = NULL; continue; } ret = module_memory_alloc(mod, type); if (ret) { t = type; goto out_enomem; } } /* Transfer each section which specifies SHF_ALLOC */ pr_debug("Final section addresses for %s:\n", mod->name); for (i = 0; i < info->hdr->e_shnum; i++) { void *dest; Elf_Shdr *shdr = &info->sechdrs[i]; enum mod_mem_type type = shdr->sh_entsize >> SH_ENTSIZE_TYPE_SHIFT; if (!(shdr->sh_flags & SHF_ALLOC)) continue; dest = mod->mem[type].base + (shdr->sh_entsize & SH_ENTSIZE_OFFSET_MASK); if (shdr->sh_type != SHT_NOBITS) { /* * Our ELF checker already validated this, but let's * be pedantic and make the goal clearer. We actually * end up copying over all modifications made to the * userspace copy of the entire struct module. */ if (i == info->index.mod && (WARN_ON_ONCE(shdr->sh_size != sizeof(struct module)))) { ret = -ENOEXEC; goto out_enomem; } memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); } /* * Update the userspace copy's ELF section address to point to * our newly allocated memory as a pure convenience so that * users of info can keep taking advantage and using the newly * minted official memory area. */ shdr->sh_addr = (unsigned long)dest; pr_debug("\t0x%lx 0x%.8lx %s\n", (long)shdr->sh_addr, (long)shdr->sh_size, info->secstrings + shdr->sh_name); } return 0; out_enomem: for (t--; t >= 0; t--) module_memory_free(mod, t, true); return ret; } static int check_export_symbol_versions(struct module *mod) { #ifdef CONFIG_MODVERSIONS if ((mod->num_syms && !mod->crcs) || (mod->num_gpl_syms && !mod->gpl_crcs)) { return try_to_force_load(mod, "no versions for exported symbols"); } #endif return 0; } static void flush_module_icache(const struct module *mod) { /* * Flush the instruction cache, since we've played with text. * Do it before processing of module parameters, so the module * can provide parameter accessor functions of its own. */ for_each_mod_mem_type(type) { const struct module_memory *mod_mem = &mod->mem[type]; if (mod_mem->size) { flush_icache_range((unsigned long)mod_mem->base, (unsigned long)mod_mem->base + mod_mem->size); } } } bool __weak module_elf_check_arch(Elf_Ehdr *hdr) { return true; } int __weak module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod) { return 0; } /* module_blacklist is a comma-separated list of module names */ static char *module_blacklist; static bool blacklisted(const char *module_name) { const char *p; size_t len; if (!module_blacklist) return false; for (p = module_blacklist; *p; p += len) { len = strcspn(p, ","); if (strlen(module_name) == len && !memcmp(module_name, p, len)) return true; if (p[len] == ',') len++; } return false; } core_param(module_blacklist, module_blacklist, charp, 0400); static struct module *layout_and_allocate(struct load_info *info, int flags) { struct module *mod; unsigned int ndx; int err; /* Allow arches to frob section contents and sizes. */ err = module_frob_arch_sections(info->hdr, info->sechdrs, info->secstrings, info->mod); if (err < 0) return ERR_PTR(err); err = module_enforce_rwx_sections(info->hdr, info->sechdrs, info->secstrings, info->mod); if (err < 0) return ERR_PTR(err); /* We will do a special allocation for per-cpu sections later. */ info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; /* * Mark ro_after_init section with SHF_RO_AFTER_INIT so that * layout_sections() can put it in the right place. * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set. */ ndx = find_sec(info, ".data..ro_after_init"); if (ndx) info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; /* * Mark the __jump_table section as ro_after_init as well: these data * structures are never modified, with the exception of entries that * refer to code in the __init section, which are annotated as such * at module load time. */ ndx = find_sec(info, "__jump_table"); if (ndx) info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; /* * Determine total sizes, and put offsets in sh_entsize. For now * this is done generically; there doesn't appear to be any * special cases for the architectures. */ layout_sections(info->mod, info); layout_symtab(info->mod, info); /* Allocate and move to the final place */ err = move_module(info->mod, info); if (err) return ERR_PTR(err); /* Module has been copied to its final place now: return it. */ mod = (void *)info->sechdrs[info->index.mod].sh_addr; kmemleak_load_module(mod, info); return mod; } /* mod is no longer valid after this! */ static void module_deallocate(struct module *mod, struct load_info *info) { percpu_modfree(mod); module_arch_freeing_init(mod); free_mod_mem(mod, true); } int __weak module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { return 0; } static int post_relocation(struct module *mod, const struct load_info *info) { /* Sort exception table now relocations are done. */ sort_extable(mod->extable, mod->extable + mod->num_exentries); /* Copy relocated percpu area over. */ percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, info->sechdrs[info->index.pcpu].sh_size); /* Setup kallsyms-specific fields. */ add_kallsyms(mod, info); /* Arch-specific module finalizing. */ return module_finalize(info->hdr, info->sechdrs, mod); } /* Call module constructors. */ static void do_mod_ctors(struct module *mod) { #ifdef CONFIG_CONSTRUCTORS unsigned long i; for (i = 0; i < mod->num_ctors; i++) mod->ctors[i](); #endif } /* For freeing module_init on success, in case kallsyms traversing */ struct mod_initfree { struct llist_node node; void *init_text; void *init_data; void *init_rodata; }; static void do_free_init(struct work_struct *w) { struct llist_node *pos, *n, *list; struct mod_initfree *initfree; list = llist_del_all(&init_free_list); synchronize_rcu(); llist_for_each_safe(pos, n, list) { initfree = container_of(pos, struct mod_initfree, node); execmem_free(initfree->init_text); execmem_free(initfree->init_data); execmem_free(initfree->init_rodata); kfree(initfree); } } void flush_module_init_free_work(void) { flush_work(&init_free_wq); } #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "module." /* Default value for module->async_probe_requested */ static bool async_probe; module_param(async_probe, bool, 0644); /* * This is where the real work happens. * * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb * helper command 'lx-symbols'. */ static noinline int do_init_module(struct module *mod) { int ret = 0; struct mod_initfree *freeinit; #if defined(CONFIG_MODULE_STATS) unsigned int text_size = 0, total_size = 0; for_each_mod_mem_type(type) { const struct module_memory *mod_mem = &mod->mem[type]; if (mod_mem->size) { total_size += mod_mem->size; if (type == MOD_TEXT || type == MOD_INIT_TEXT) text_size += mod_mem->size; } } #endif freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); if (!freeinit) { ret = -ENOMEM; goto fail; } freeinit->init_text = mod->mem[MOD_INIT_TEXT].base; freeinit->init_data = mod->mem[MOD_INIT_DATA].base; freeinit->init_rodata = mod->mem[MOD_INIT_RODATA].base; do_mod_ctors(mod); /* Start the module */ if (mod->init != NULL) ret = do_one_initcall(mod->init); if (ret < 0) { goto fail_free_freeinit; } if (ret > 0) { pr_warn("%s: '%s'->init suspiciously returned %d, it should " "follow 0/-E convention\n" "%s: loading module anyway...\n", __func__, mod->name, ret, __func__); dump_stack(); } /* Now it's a first class citizen! */ mod->state = MODULE_STATE_LIVE; blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_LIVE, mod); /* Delay uevent until module has finished its init routine */ kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); /* * We need to finish all async code before the module init sequence * is done. This has potential to deadlock if synchronous module * loading is requested from async (which is not allowed!). * * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous * request_module() from async workers") for more details. */ if (!mod->async_probe_requested) async_synchronize_full(); ftrace_free_mem(mod, mod->mem[MOD_INIT_TEXT].base, mod->mem[MOD_INIT_TEXT].base + mod->mem[MOD_INIT_TEXT].size); mutex_lock(&module_mutex); /* Drop initial reference. */ module_put(mod); trim_init_extable(mod); #ifdef CONFIG_KALLSYMS /* Switch to core kallsyms now init is done: kallsyms may be walking! */ rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); #endif ret = module_enable_rodata_ro(mod, true); if (ret) goto fail_mutex_unlock; mod_tree_remove_init(mod); module_arch_freeing_init(mod); for_class_mod_mem_type(type, init) { mod->mem[type].base = NULL; mod->mem[type].size = 0; } #ifdef CONFIG_DEBUG_INFO_BTF_MODULES /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointers */ mod->btf_data = NULL; mod->btf_base_data = NULL; #endif /* * We want to free module_init, but be aware that kallsyms may be * walking this with preempt disabled. In all the failure paths, we * call synchronize_rcu(), but we don't want to slow down the success * path. execmem_free() cannot be called in an interrupt, so do the * work and call synchronize_rcu() in a work queue. * * Note that execmem_alloc() on most architectures creates W+X page * mappings which won't be cleaned up until do_free_init() runs. Any * code such as mark_rodata_ro() which depends on those mappings to * be cleaned up needs to sync with the queued work by invoking * flush_module_init_free_work(). */ if (llist_add(&freeinit->node, &init_free_list)) schedule_work(&init_free_wq); mutex_unlock(&module_mutex); wake_up_all(&module_wq); mod_stat_add_long(text_size, &total_text_size); mod_stat_add_long(total_size, &total_mod_size); mod_stat_inc(&modcount); return 0; fail_mutex_unlock: mutex_unlock(&module_mutex); fail_free_freeinit: kfree(freeinit); fail: /* Try to protect us from buggy refcounters. */ mod->state = MODULE_STATE_GOING; synchronize_rcu(); module_put(mod); blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_GOING, mod); klp_module_going(mod); ftrace_release_mod(mod); free_module(mod); wake_up_all(&module_wq); return ret; } static int may_init_module(void) { if (!capable(CAP_SYS_MODULE) || modules_disabled) return -EPERM; return 0; } /* Is this module of this name done loading? No locks held. */ static bool finished_loading(const char *name) { struct module *mod; bool ret; /* * The module_mutex should not be a heavily contended lock; * if we get the occasional sleep here, we'll go an extra iteration * in the wait_event_interruptible(), which is harmless. */ sched_annotate_sleep(); mutex_lock(&module_mutex); mod = find_module_all(name, strlen(name), true); ret = !mod || mod->state == MODULE_STATE_LIVE || mod->state == MODULE_STATE_GOING; mutex_unlock(&module_mutex); return ret; } /* Must be called with module_mutex held */ static int module_patient_check_exists(const char *name, enum fail_dup_mod_reason reason) { struct module *old; int err = 0; old = find_module_all(name, strlen(name), true); if (old == NULL) return 0; if (old->state == MODULE_STATE_COMING || old->state == MODULE_STATE_UNFORMED) { /* Wait in case it fails to load. */ mutex_unlock(&module_mutex); err = wait_event_interruptible(module_wq, finished_loading(name)); mutex_lock(&module_mutex); if (err) return err; /* The module might have gone in the meantime. */ old = find_module_all(name, strlen(name), true); } if (try_add_failed_module(name, reason)) pr_warn("Could not add fail-tracking for module: %s\n", name); /* * We are here only when the same module was being loaded. Do * not try to load it again right now. It prevents long delays * caused by serialized module load failures. It might happen * when more devices of the same type trigger load of * a particular module. */ if (old && old->state == MODULE_STATE_LIVE) return -EEXIST; return -EBUSY; } /* * We try to place it in the list now to make sure it's unique before * we dedicate too many resources. In particular, temporary percpu * memory exhaustion. */ static int add_unformed_module(struct module *mod) { int err; mod->state = MODULE_STATE_UNFORMED; mutex_lock(&module_mutex); err = module_patient_check_exists(mod->name, FAIL_DUP_MOD_LOAD); if (err) goto out; mod_update_bounds(mod); list_add_rcu(&mod->list, &modules); mod_tree_insert(mod); err = 0; out: mutex_unlock(&module_mutex); return err; } static int complete_formation(struct module *mod, struct load_info *info) { int err; mutex_lock(&module_mutex); /* Find duplicate symbols (must be called under lock). */ err = verify_exported_symbols(mod); if (err < 0) goto out; /* These rely on module_mutex for list integrity. */ module_bug_finalize(info->hdr, info->sechdrs, mod); module_cfi_finalize(info->hdr, info->sechdrs, mod); err = module_enable_rodata_ro(mod, false); if (err) goto out_strict_rwx; err = module_enable_data_nx(mod); if (err) goto out_strict_rwx; err = module_enable_text_rox(mod); if (err) goto out_strict_rwx; /* * Mark state as coming so strong_try_module_get() ignores us, * but kallsyms etc. can see us. */ mod->state = MODULE_STATE_COMING; mutex_unlock(&module_mutex); return 0; out_strict_rwx: module_bug_cleanup(mod); out: mutex_unlock(&module_mutex); return err; } static int prepare_coming_module(struct module *mod) { int err; ftrace_module_enable(mod); err = klp_module_coming(mod); if (err) return err; err = blocking_notifier_call_chain_robust(&module_notify_list, MODULE_STATE_COMING, MODULE_STATE_GOING, mod); err = notifier_to_errno(err); if (err) klp_module_going(mod); return err; } static int unknown_module_param_cb(char *param, char *val, const char *modname, void *arg) { struct module *mod = arg; int ret; if (strcmp(param, "async_probe") == 0) { if (kstrtobool(val, &mod->async_probe_requested)) mod->async_probe_requested = true; return 0; } /* Check for magic 'dyndbg' arg */ ret = ddebug_dyndbg_module_param_cb(param, val, modname); if (ret != 0) pr_warn("%s: unknown parameter '%s' ignored\n", modname, param); return 0; } /* Module within temporary copy, this doesn't do any allocation */ static int early_mod_check(struct load_info *info, int flags) { int err; /* * Now that we know we have the correct module name, check * if it's blacklisted. */ if (blacklisted(info->name)) { pr_err("Module %s is blacklisted\n", info->name); return -EPERM; } err = rewrite_section_headers(info, flags); if (err) return err; /* Check module struct version now, before we try to use module. */ if (!check_modstruct_version(info, info->mod)) return -ENOEXEC; err = check_modinfo(info->mod, info, flags); if (err) return err; mutex_lock(&module_mutex); err = module_patient_check_exists(info->mod->name, FAIL_DUP_MOD_BECOMING); mutex_unlock(&module_mutex); return err; } /* * Allocate and load the module: note that size of section 0 is always * zero, and we rely on this for optional sections. */ static int load_module(struct load_info *info, const char __user *uargs, int flags) { struct module *mod; bool module_allocated = false; long err = 0; char *after_dashes; /* * Do the signature check (if any) first. All that * the signature check needs is info->len, it does * not need any of the section info. That can be * set up later. This will minimize the chances * of a corrupt module causing problems before * we even get to the signature check. * * The check will also adjust info->len by stripping * off the sig length at the end of the module, making * checks against info->len more correct. */ err = module_sig_check(info, flags); if (err) goto free_copy; /* * Do basic sanity checks against the ELF header and * sections. Cache useful sections and set the * info->mod to the userspace passed struct module. */ err = elf_validity_cache_copy(info, flags); if (err) goto free_copy; err = early_mod_check(info, flags); if (err) goto free_copy; /* Figure out module layout, and allocate all the memory. */ mod = layout_and_allocate(info, flags); if (IS_ERR(mod)) { err = PTR_ERR(mod); goto free_copy; } module_allocated = true; audit_log_kern_module(mod->name); /* Reserve our place in the list. */ err = add_unformed_module(mod); if (err) goto free_module; /* * We are tainting your kernel if your module gets into * the modules linked list somehow. */ module_augment_kernel_taints(mod, info); /* To avoid stressing percpu allocator, do this once we're unique. */ err = percpu_modalloc(mod, info); if (err) goto unlink_mod; /* Now module is in final location, initialize linked lists, etc. */ err = module_unload_init(mod); if (err) goto unlink_mod; init_param_lock(mod); /* * Now we've got everything in the final locations, we can * find optional sections. */ err = find_module_sections(mod, info); if (err) goto free_unload; err = check_export_symbol_versions(mod); if (err) goto free_unload; /* Set up MODINFO_ATTR fields */ setup_modinfo(mod, info); /* Fix up syms, so that st_value is a pointer to location. */ err = simplify_symbols(mod, info); if (err < 0) goto free_modinfo; err = apply_relocations(mod, info); if (err < 0) goto free_modinfo; err = post_relocation(mod, info); if (err < 0) goto free_modinfo; flush_module_icache(mod); /* Now copy in args */ mod->args = strndup_user(uargs, ~0UL >> 1); if (IS_ERR(mod->args)) { err = PTR_ERR(mod->args); goto free_arch_cleanup; } init_build_id(mod, info); /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ ftrace_module_init(mod); /* Finally it's fully formed, ready to start executing. */ err = complete_formation(mod, info); if (err) goto ddebug_cleanup; err = prepare_coming_module(mod); if (err) goto bug_cleanup; mod->async_probe_requested = async_probe; /* Module is ready to execute: parsing args may do that. */ after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, -32768, 32767, mod, unknown_module_param_cb); if (IS_ERR(after_dashes)) { err = PTR_ERR(after_dashes); goto coming_cleanup; } else if (after_dashes) { pr_warn("%s: parameters '%s' after `--' ignored\n", mod->name, after_dashes); } /* Link in to sysfs. */ err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); if (err < 0) goto coming_cleanup; if (is_livepatch_module(mod)) { err = copy_module_elf(mod, info); if (err < 0) goto sysfs_cleanup; } /* Get rid of temporary copy. */ free_copy(info, flags); codetag_load_module(mod); /* Done! */ trace_module_load(mod); return do_init_module(mod); sysfs_cleanup: mod_sysfs_teardown(mod); coming_cleanup: mod->state = MODULE_STATE_GOING; destroy_params(mod->kp, mod->num_kp); blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_GOING, mod); klp_module_going(mod); bug_cleanup: mod->state = MODULE_STATE_GOING; /* module_bug_cleanup needs module_mutex protection */ mutex_lock(&module_mutex); module_bug_cleanup(mod); mutex_unlock(&module_mutex); ddebug_cleanup: ftrace_release_mod(mod); synchronize_rcu(); kfree(mod->args); free_arch_cleanup: module_arch_cleanup(mod); free_modinfo: free_modinfo(mod); free_unload: module_unload_free(mod); unlink_mod: mutex_lock(&module_mutex); /* Unlink carefully: kallsyms could be walking list. */ list_del_rcu(&mod->list); mod_tree_remove(mod); wake_up_all(&module_wq); /* Wait for RCU-sched synchronizing before releasing mod->list. */ synchronize_rcu(); mutex_unlock(&module_mutex); free_module: mod_stat_bump_invalid(info, flags); /* Free lock-classes; relies on the preceding sync_rcu() */ for_class_mod_mem_type(type, core_data) { lockdep_free_key_range(mod->mem[type].base, mod->mem[type].size); } module_deallocate(mod, info); free_copy: /* * The info->len is always set. We distinguish between * failures once the proper module was allocated and * before that. */ if (!module_allocated) mod_stat_bump_becoming(info, flags); free_copy(info, flags); return err; } SYSCALL_DEFINE3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs) { int err; struct load_info info = { }; err = may_init_module(); if (err) return err; pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", umod, len, uargs); err = copy_module_from_user(umod, len, &info); if (err) { mod_stat_inc(&failed_kreads); mod_stat_add_long(len, &invalid_kread_bytes); return err; } return load_module(&info, uargs, 0); } struct idempotent { const void *cookie; struct hlist_node entry; struct completion complete; int ret; }; #define IDEM_HASH_BITS 8 static struct hlist_head idem_hash[1 << IDEM_HASH_BITS]; static DEFINE_SPINLOCK(idem_lock); static bool idempotent(struct idempotent *u, const void *cookie) { int hash = hash_ptr(cookie, IDEM_HASH_BITS); struct hlist_head *head = idem_hash + hash; struct idempotent *existing; bool first; u->ret = -EINTR; u->cookie = cookie; init_completion(&u->complete); spin_lock(&idem_lock); first = true; hlist_for_each_entry(existing, head, entry) { if (existing->cookie != cookie) continue; first = false; break; } hlist_add_head(&u->entry, idem_hash + hash); spin_unlock(&idem_lock); return !first; } /* * We were the first one with 'cookie' on the list, and we ended * up completing the operation. We now need to walk the list, * remove everybody - which includes ourselves - fill in the return * value, and then complete the operation. */ static int idempotent_complete(struct idempotent *u, int ret) { const void *cookie = u->cookie; int hash = hash_ptr(cookie, IDEM_HASH_BITS); struct hlist_head *head = idem_hash + hash; struct hlist_node *next; struct idempotent *pos; spin_lock(&idem_lock); hlist_for_each_entry_safe(pos, next, head, entry) { if (pos->cookie != cookie) continue; hlist_del_init(&pos->entry); pos->ret = ret; complete(&pos->complete); } spin_unlock(&idem_lock); return ret; } /* * Wait for the idempotent worker. * * If we get interrupted, we need to remove ourselves from the * the idempotent list, and the completion may still come in. * * The 'idem_lock' protects against the race, and 'idem.ret' was * initialized to -EINTR and is thus always the right return * value even if the idempotent work then completes between * the wait_for_completion and the cleanup. */ static int idempotent_wait_for_completion(struct idempotent *u) { if (wait_for_completion_interruptible(&u->complete)) { spin_lock(&idem_lock); if (!hlist_unhashed(&u->entry)) hlist_del(&u->entry); spin_unlock(&idem_lock); } return u->ret; } static int init_module_from_file(struct file *f, const char __user * uargs, int flags) { struct load_info info = { }; void *buf = NULL; int len; len = kernel_read_file(f, 0, &buf, INT_MAX, NULL, READING_MODULE); if (len < 0) { mod_stat_inc(&failed_kreads); return len; } if (flags & MODULE_INIT_COMPRESSED_FILE) { int err = module_decompress(&info, buf, len); vfree(buf); /* compressed data is no longer needed */ if (err) { mod_stat_inc(&failed_decompress); mod_stat_add_long(len, &invalid_decompress_bytes); return err; } } else { info.hdr = buf; info.len = len; } return load_module(&info, uargs, flags); } static int idempotent_init_module(struct file *f, const char __user * uargs, int flags) { struct idempotent idem; if (!f || !(f->f_mode & FMODE_READ)) return -EBADF; /* Are we the winners of the race and get to do this? */ if (!idempotent(&idem, file_inode(f))) { int ret = init_module_from_file(f, uargs, flags); return idempotent_complete(&idem, ret); } /* * Somebody else won the race and is loading the module. */ return idempotent_wait_for_completion(&idem); } SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) { int err; struct fd f; err = may_init_module(); if (err) return err; pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS |MODULE_INIT_IGNORE_VERMAGIC |MODULE_INIT_COMPRESSED_FILE)) return -EINVAL; f = fdget(fd); err = idempotent_init_module(f.file, uargs, flags); fdput(f); return err; } /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */ char *module_flags(struct module *mod, char *buf, bool show_state) { int bx = 0; BUG_ON(mod->state == MODULE_STATE_UNFORMED); if (!mod->taints && !show_state) goto out; if (mod->taints || mod->state == MODULE_STATE_GOING || mod->state == MODULE_STATE_COMING) { buf[bx++] = '('; bx += module_flags_taint(mod->taints, buf + bx); /* Show a - for module-is-being-unloaded */ if (mod->state == MODULE_STATE_GOING && show_state) buf[bx++] = '-'; /* Show a + for module-is-being-loaded */ if (mod->state == MODULE_STATE_COMING && show_state) buf[bx++] = '+'; buf[bx++] = ')'; } out: buf[bx] = '\0'; return buf; } /* Given an address, look for it in the module exception tables. */ const struct exception_table_entry *search_module_extables(unsigned long addr) { const struct exception_table_entry *e = NULL; struct module *mod; preempt_disable(); mod = __module_address(addr); if (!mod) goto out; if (!mod->num_exentries) goto out; e = search_extable(mod->extable, mod->num_exentries, addr); out: preempt_enable(); /* * Now, if we found one, we are running inside it now, hence * we cannot unload the module, hence no refcnt needed. */ return e; } /** * is_module_address() - is this address inside a module? * @addr: the address to check. * * See is_module_text_address() if you simply want to see if the address * is code (not data). */ bool is_module_address(unsigned long addr) { bool ret; preempt_disable(); ret = __module_address(addr) != NULL; preempt_enable(); return ret; } /** * __module_address() - get the module which contains an address. * @addr: the address. * * Must be called with preempt disabled or module mutex held so that * module doesn't get freed during this. */ struct module *__module_address(unsigned long addr) { struct module *mod; if (addr >= mod_tree.addr_min && addr <= mod_tree.addr_max) goto lookup; #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC if (addr >= mod_tree.data_addr_min && addr <= mod_tree.data_addr_max) goto lookup; #endif return NULL; lookup: module_assert_mutex_or_preempt(); mod = mod_find(addr, &mod_tree); if (mod) { BUG_ON(!within_module(addr, mod)); if (mod->state == MODULE_STATE_UNFORMED) mod = NULL; } return mod; } /** * is_module_text_address() - is this address inside module code? * @addr: the address to check. * * See is_module_address() if you simply want to see if the address is * anywhere in a module. See kernel_text_address() for testing if an * address corresponds to kernel or module code. */ bool is_module_text_address(unsigned long addr) { bool ret; preempt_disable(); ret = __module_text_address(addr) != NULL; preempt_enable(); return ret; } /** * __module_text_address() - get the module whose code contains an address. * @addr: the address. * * Must be called with preempt disabled or module mutex held so that * module doesn't get freed during this. */ struct module *__module_text_address(unsigned long addr) { struct module *mod = __module_address(addr); if (mod) { /* Make sure it's within the text section. */ if (!within_module_mem_type(addr, mod, MOD_TEXT) && !within_module_mem_type(addr, mod, MOD_INIT_TEXT)) mod = NULL; } return mod; } /* Don't grab lock, we're oopsing. */ void print_modules(void) { struct module *mod; char buf[MODULE_FLAGS_BUF_SIZE]; printk(KERN_DEFAULT "Modules linked in:"); /* Most callers should already have preempt disabled, but make sure */ preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { if (mod->state == MODULE_STATE_UNFORMED) continue; pr_cont(" %s%s", mod->name, module_flags(mod, buf, true)); } print_unloaded_tainted_modules(); preempt_enable(); if (last_unloaded_module.name[0]) pr_cont(" [last unloaded: %s%s]", last_unloaded_module.name, last_unloaded_module.taints); pr_cont("\n"); } #ifdef CONFIG_MODULE_DEBUGFS struct dentry *mod_debugfs_root; static int module_debugfs_init(void) { mod_debugfs_root = debugfs_create_dir("modules", NULL); return 0; } module_init(module_debugfs_init); #endif
65 65 367 308 65 331 336 9 3 3 3 3 10 3 7 7 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 // SPDX-License-Identifier: GPL-2.0 /* * linux/ipc/namespace.c * Copyright (C) 2006 Pavel Emelyanov <xemul@openvz.org> OpenVZ, SWsoft Inc. */ #include <linux/ipc.h> #include <linux/msg.h> #include <linux/ipc_namespace.h> #include <linux/rcupdate.h> #include <linux/nsproxy.h> #include <linux/slab.h> #include <linux/cred.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/user_namespace.h> #include <linux/proc_ns.h> #include <linux/sched/task.h> #include "util.h" /* * The work queue is used to avoid the cost of synchronize_rcu in kern_unmount. */ static void free_ipc(struct work_struct *unused); static DECLARE_WORK(free_ipc_work, free_ipc); static struct ucounts *inc_ipc_namespaces(struct user_namespace *ns) { return inc_ucount(ns, current_euid(), UCOUNT_IPC_NAMESPACES); } static void dec_ipc_namespaces(struct ucounts *ucounts) { dec_ucount(ucounts, UCOUNT_IPC_NAMESPACES); } static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns, struct ipc_namespace *old_ns) { struct ipc_namespace *ns; struct ucounts *ucounts; int err; err = -ENOSPC; again: ucounts = inc_ipc_namespaces(user_ns); if (!ucounts) { /* * IPC namespaces are freed asynchronously, by free_ipc_work. * If frees were pending, flush_work will wait, and * return true. Fail the allocation if no frees are pending. */ if (flush_work(&free_ipc_work)) goto again; goto fail; } err = -ENOMEM; ns = kzalloc(sizeof(struct ipc_namespace), GFP_KERNEL_ACCOUNT); if (ns == NULL) goto fail_dec; err = ns_alloc_inum(&ns->ns); if (err) goto fail_free; ns->ns.ops = &ipcns_operations; refcount_set(&ns->ns.count, 1); ns->user_ns = get_user_ns(user_ns); ns->ucounts = ucounts; err = mq_init_ns(ns); if (err) goto fail_put; err = -ENOMEM; if (!setup_mq_sysctls(ns)) goto fail_put; if (!setup_ipc_sysctls(ns)) goto fail_mq; err = msg_init_ns(ns); if (err) goto fail_put; sem_init_ns(ns); shm_init_ns(ns); return ns; fail_mq: retire_mq_sysctls(ns); fail_put: put_user_ns(ns->user_ns); ns_free_inum(&ns->ns); fail_free: kfree(ns); fail_dec: dec_ipc_namespaces(ucounts); fail: return ERR_PTR(err); } struct ipc_namespace *copy_ipcs(unsigned long flags, struct user_namespace *user_ns, struct ipc_namespace *ns) { if (!(flags & CLONE_NEWIPC)) return get_ipc_ns(ns); return create_ipc_ns(user_ns, ns); } /* * free_ipcs - free all ipcs of one type * @ns: the namespace to remove the ipcs from * @ids: the table of ipcs to free * @free: the function called to free each individual ipc * * Called for each kind of ipc when an ipc_namespace exits. */ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, void (*free)(struct ipc_namespace *, struct kern_ipc_perm *)) { struct kern_ipc_perm *perm; int next_id; int total, in_use; down_write(&ids->rwsem); in_use = ids->in_use; for (total = 0, next_id = 0; total < in_use; next_id++) { perm = idr_find(&ids->ipcs_idr, next_id); if (perm == NULL) continue; rcu_read_lock(); ipc_lock_object(perm); free(ns, perm); total++; } up_write(&ids->rwsem); } static void free_ipc_ns(struct ipc_namespace *ns) { /* * Caller needs to wait for an RCU grace period to have passed * after making the mount point inaccessible to new accesses. */ mntput(ns->mq_mnt); sem_exit_ns(ns); msg_exit_ns(ns); shm_exit_ns(ns); retire_mq_sysctls(ns); retire_ipc_sysctls(ns); dec_ipc_namespaces(ns->ucounts); put_user_ns(ns->user_ns); ns_free_inum(&ns->ns); kfree(ns); } static LLIST_HEAD(free_ipc_list); static void free_ipc(struct work_struct *unused) { struct llist_node *node = llist_del_all(&free_ipc_list); struct ipc_namespace *n, *t; llist_for_each_entry_safe(n, t, node, mnt_llist) mnt_make_shortterm(n->mq_mnt); /* Wait for any last users to have gone away. */ synchronize_rcu(); llist_for_each_entry_safe(n, t, node, mnt_llist) free_ipc_ns(n); } /* * put_ipc_ns - drop a reference to an ipc namespace. * @ns: the namespace to put * * If this is the last task in the namespace exiting, and * it is dropping the refcount to 0, then it can race with * a task in another ipc namespace but in a mounts namespace * which has this ipcns's mqueuefs mounted, doing some action * with one of the mqueuefs files. That can raise the refcount. * So dropping the refcount, and raising the refcount when * accessing it through the VFS, are protected with mq_lock. * * (Clearly, a task raising the refcount on its own ipc_ns * needn't take mq_lock since it can't race with the last task * in the ipcns exiting). */ void put_ipc_ns(struct ipc_namespace *ns) { if (refcount_dec_and_lock(&ns->ns.count, &mq_lock)) { mq_clear_sbinfo(ns); spin_unlock(&mq_lock); if (llist_add(&ns->mnt_llist, &free_ipc_list)) schedule_work(&free_ipc_work); } } static inline struct ipc_namespace *to_ipc_ns(struct ns_common *ns) { return container_of(ns, struct ipc_namespace, ns); } static struct ns_common *ipcns_get(struct task_struct *task) { struct ipc_namespace *ns = NULL; struct nsproxy *nsproxy; task_lock(task); nsproxy = task->nsproxy; if (nsproxy) ns = get_ipc_ns(nsproxy->ipc_ns); task_unlock(task); return ns ? &ns->ns : NULL; } static void ipcns_put(struct ns_common *ns) { return put_ipc_ns(to_ipc_ns(ns)); } static int ipcns_install(struct nsset *nsset, struct ns_common *new) { struct nsproxy *nsproxy = nsset->nsproxy; struct ipc_namespace *ns = to_ipc_ns(new); if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) || !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN)) return -EPERM; put_ipc_ns(nsproxy->ipc_ns); nsproxy->ipc_ns = get_ipc_ns(ns); return 0; } static struct user_namespace *ipcns_owner(struct ns_common *ns) { return to_ipc_ns(ns)->user_ns; } const struct proc_ns_operations ipcns_operations = { .name = "ipc", .type = CLONE_NEWIPC, .get = ipcns_get, .put = ipcns_put, .install = ipcns_install, .owner = ipcns_owner, };
6 6 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2014 Protonic Holland, * David Jander * Copyright (C) 2014-2021, 2023 Pengutronix, * Marc Kleine-Budde <kernel@pengutronix.de> */ #include <linux/can/dev.h> #include <linux/can/rx-offload.h> struct can_rx_offload_cb { u32 timestamp; }; static inline struct can_rx_offload_cb * can_rx_offload_get_cb(struct sk_buff *skb) { BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb)); return (struct can_rx_offload_cb *)skb->cb; } static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b) { if (offload->inc) return a <= b; else return a >= b; } static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val) { if (offload->inc) return (*val)++; else return (*val)--; } static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) { struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi); struct net_device *dev = offload->dev; struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; int work_done = 0; while ((work_done < quota) && (skb = skb_dequeue(&offload->skb_queue))) { struct can_frame *cf = (struct can_frame *)skb->data; work_done++; if (!(cf->can_id & CAN_ERR_FLAG)) { stats->rx_packets++; if (!(cf->can_id & CAN_RTR_FLAG)) stats->rx_bytes += cf->len; } netif_receive_skb(skb); } if (work_done < quota) { napi_complete_done(napi, work_done); /* Check if there was another interrupt */ if (!skb_queue_empty(&offload->skb_queue)) napi_schedule(&offload->napi); } return work_done; } static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, int (*compare)(struct sk_buff *a, struct sk_buff *b)) { struct sk_buff *pos, *insert = NULL; skb_queue_reverse_walk(head, pos) { const struct can_rx_offload_cb *cb_pos, *cb_new; cb_pos = can_rx_offload_get_cb(pos); cb_new = can_rx_offload_get_cb(new); netdev_dbg(new->dev, "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n", __func__, cb_pos->timestamp, cb_new->timestamp, cb_new->timestamp - cb_pos->timestamp, skb_queue_len(head)); if (compare(pos, new) < 0) continue; insert = pos; break; } if (!insert) __skb_queue_head(head, new); else __skb_queue_after(head, insert, new); } static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b) { const struct can_rx_offload_cb *cb_a, *cb_b; cb_a = can_rx_offload_get_cb(a); cb_b = can_rx_offload_get_cb(b); /* Subtract two u32 and return result as int, to keep * difference steady around the u32 overflow. */ return cb_b->timestamp - cb_a->timestamp; } /** * can_rx_offload_offload_one() - Read one CAN frame from HW * @offload: pointer to rx_offload context * @n: number of mailbox to read * * The task of this function is to read a CAN frame from mailbox @n * from the device and return the mailbox's content as a struct * sk_buff. * * If the struct can_rx_offload::skb_queue exceeds the maximal queue * length (struct can_rx_offload::skb_queue_len_max) or no skb can be * allocated, the mailbox contents is discarded by reading it into an * overflow buffer. This way the mailbox is marked as free by the * driver. * * Return: A pointer to skb containing the CAN frame on success. * * NULL if the mailbox @n is empty. * * ERR_PTR() in case of an error */ static struct sk_buff * can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) { struct sk_buff *skb; struct can_rx_offload_cb *cb; bool drop = false; u32 timestamp; /* If queue is full drop frame */ if (unlikely(skb_queue_len(&offload->skb_queue) > offload->skb_queue_len_max)) drop = true; skb = offload->mailbox_read(offload, n, &timestamp, drop); /* Mailbox was empty. */ if (unlikely(!skb)) return NULL; /* There was a problem reading the mailbox, propagate * error value. */ if (IS_ERR(skb)) { offload->dev->stats.rx_dropped++; offload->dev->stats.rx_fifo_errors++; return skb; } /* Mailbox was read. */ cb = can_rx_offload_get_cb(skb); cb->timestamp = timestamp; return skb; } int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending) { unsigned int i; int received = 0; for (i = offload->mb_first; can_rx_offload_le(offload, i, offload->mb_last); can_rx_offload_inc(offload, &i)) { struct sk_buff *skb; if (!(pending & BIT_ULL(i))) continue; skb = can_rx_offload_offload_one(offload, i); if (IS_ERR_OR_NULL(skb)) continue; __skb_queue_add_sort(&offload->skb_irq_queue, skb, can_rx_offload_compare); received++; } return received; } EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp); int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) { struct sk_buff *skb; int received = 0; while (1) { skb = can_rx_offload_offload_one(offload, 0); if (IS_ERR(skb)) continue; if (!skb) break; __skb_queue_tail(&offload->skb_irq_queue, skb); received++; } return received; } EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); int can_rx_offload_queue_timestamp(struct can_rx_offload *offload, struct sk_buff *skb, u32 timestamp) { struct can_rx_offload_cb *cb; if (skb_queue_len(&offload->skb_queue) > offload->skb_queue_len_max) { dev_kfree_skb_any(skb); return -ENOBUFS; } cb = can_rx_offload_get_cb(skb); cb->timestamp = timestamp; __skb_queue_add_sort(&offload->skb_irq_queue, skb, can_rx_offload_compare); return 0; } EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp); unsigned int can_rx_offload_get_echo_skb_queue_timestamp(struct can_rx_offload *offload, unsigned int idx, u32 timestamp, unsigned int *frame_len_ptr) { struct net_device *dev = offload->dev; struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; unsigned int len; int err; skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr); if (!skb) return 0; err = can_rx_offload_queue_timestamp(offload, skb, timestamp); if (err) { stats->rx_errors++; stats->tx_fifo_errors++; } return len; } EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_timestamp); int can_rx_offload_queue_tail(struct can_rx_offload *offload, struct sk_buff *skb) { if (skb_queue_len(&offload->skb_queue) > offload->skb_queue_len_max) { dev_kfree_skb_any(skb); return -ENOBUFS; } __skb_queue_tail(&offload->skb_irq_queue, skb); return 0; } EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail); unsigned int can_rx_offload_get_echo_skb_queue_tail(struct can_rx_offload *offload, unsigned int idx, unsigned int *frame_len_ptr) { struct net_device *dev = offload->dev; struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; unsigned int len; int err; skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr); if (!skb) return 0; err = can_rx_offload_queue_tail(offload, skb); if (err) { stats->rx_errors++; stats->tx_fifo_errors++; } return len; } EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_tail); void can_rx_offload_irq_finish(struct can_rx_offload *offload) { unsigned long flags; int queue_len; if (skb_queue_empty_lockless(&offload->skb_irq_queue)) return; spin_lock_irqsave(&offload->skb_queue.lock, flags); skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue); spin_unlock_irqrestore(&offload->skb_queue.lock, flags); queue_len = skb_queue_len(&offload->skb_queue); if (queue_len > offload->skb_queue_len_max / 8) netdev_dbg(offload->dev, "%s: queue_len=%d\n", __func__, queue_len); napi_schedule(&offload->napi); } EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish); void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload) { unsigned long flags; int queue_len; if (skb_queue_empty_lockless(&offload->skb_irq_queue)) return; spin_lock_irqsave(&offload->skb_queue.lock, flags); skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue); spin_unlock_irqrestore(&offload->skb_queue.lock, flags); queue_len = skb_queue_len(&offload->skb_queue); if (queue_len > offload->skb_queue_len_max / 8) netdev_dbg(offload->dev, "%s: queue_len=%d\n", __func__, queue_len); local_bh_disable(); napi_schedule(&offload->napi); local_bh_enable(); } EXPORT_SYMBOL_GPL(can_rx_offload_threaded_irq_finish); static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) { offload->dev = dev; /* Limit queue len to 4x the weight (rounded to next power of two) */ offload->skb_queue_len_max = 2 << fls(weight); offload->skb_queue_len_max *= 4; skb_queue_head_init(&offload->skb_queue); __skb_queue_head_init(&offload->skb_irq_queue); netif_napi_add_weight(dev, &offload->napi, can_rx_offload_napi_poll, weight); dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n", __func__, offload->skb_queue_len_max); return 0; } int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload) { unsigned int weight; if (offload->mb_first > BITS_PER_LONG_LONG || offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read) return -EINVAL; if (offload->mb_first < offload->mb_last) { offload->inc = true; weight = offload->mb_last - offload->mb_first; } else { offload->inc = false; weight = offload->mb_first - offload->mb_last; } return can_rx_offload_init_queue(dev, offload, weight); } EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp); int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) { if (!offload->mailbox_read) return -EINVAL; return can_rx_offload_init_queue(dev, offload, weight); } EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo); int can_rx_offload_add_manual(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) { if (offload->mailbox_read) return -EINVAL; return can_rx_offload_init_queue(dev, offload, weight); } EXPORT_SYMBOL_GPL(can_rx_offload_add_manual); void can_rx_offload_enable(struct can_rx_offload *offload) { napi_enable(&offload->napi); } EXPORT_SYMBOL_GPL(can_rx_offload_enable); void can_rx_offload_del(struct can_rx_offload *offload) { netif_napi_del(&offload->napi); skb_queue_purge(&offload->skb_queue); __skb_queue_purge(&offload->skb_irq_queue); } EXPORT_SYMBOL_GPL(can_rx_offload_del);
3 2 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2001 Vojtech Pavlik * * CATC EL1210A NetMate USB Ethernet driver * * Sponsored by SuSE * * Based on the work of * Donald Becker * * Old chipset support added by Simon Evans <spse@secret.org.uk> 2002 * - adds support for Belkin F5U011 */ /* * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/bitops.h> #include <linux/gfp.h> #include <linux/uaccess.h> #undef DEBUG #include <linux/usb.h> /* * Version information. */ #define DRIVER_VERSION "v2.8" #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@suse.cz>" #define DRIVER_DESC "CATC EL1210A NetMate USB Ethernet driver" #define SHORT_DRIVER_DESC "EL1210A NetMate USB Ethernet" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); static const char driver_name[] = "catc"; /* * Some defines. */ #define STATS_UPDATE (HZ) /* Time between stats updates */ #define TX_TIMEOUT (5*HZ) /* Max time the queue can be stopped */ #define PKT_SZ 1536 /* Max Ethernet packet size */ #define RX_MAX_BURST 15 /* Max packets per rx buffer (> 0, < 16) */ #define TX_MAX_BURST 15 /* Max full sized packets per tx buffer (> 0) */ #define CTRL_QUEUE 16 /* Max control requests in flight (power of two) */ #define RX_PKT_SZ 1600 /* Max size of receive packet for F5U011 */ /* * Control requests. */ enum control_requests { ReadMem = 0xf1, GetMac = 0xf2, Reset = 0xf4, SetMac = 0xf5, SetRxMode = 0xf5, /* F5U011 only */ WriteROM = 0xf8, SetReg = 0xfa, GetReg = 0xfb, WriteMem = 0xfc, ReadROM = 0xfd, }; /* * Registers. */ enum register_offsets { TxBufCount = 0x20, RxBufCount = 0x21, OpModes = 0x22, TxQed = 0x23, RxQed = 0x24, MaxBurst = 0x25, RxUnit = 0x60, EthStatus = 0x61, StationAddr0 = 0x67, EthStats = 0x69, LEDCtrl = 0x81, }; enum eth_stats { TxSingleColl = 0x00, TxMultiColl = 0x02, TxExcessColl = 0x04, RxFramErr = 0x06, }; enum op_mode_bits { Op3MemWaits = 0x03, OpLenInclude = 0x08, OpRxMerge = 0x10, OpTxMerge = 0x20, OpWin95bugfix = 0x40, OpLoopback = 0x80, }; enum rx_filter_bits { RxEnable = 0x01, RxPolarity = 0x02, RxForceOK = 0x04, RxMultiCast = 0x08, RxPromisc = 0x10, AltRxPromisc = 0x20, /* F5U011 uses different bit */ }; enum led_values { LEDFast = 0x01, LEDSlow = 0x02, LEDFlash = 0x03, LEDPulse = 0x04, LEDLink = 0x08, }; enum link_status { LinkNoChange = 0, LinkGood = 1, LinkBad = 2 }; /* * The catc struct. */ #define CTRL_RUNNING 0 #define RX_RUNNING 1 #define TX_RUNNING 2 struct catc { struct net_device *netdev; struct usb_device *usbdev; unsigned long flags; unsigned int tx_ptr, tx_idx; unsigned int ctrl_head, ctrl_tail; spinlock_t tx_lock, ctrl_lock; u8 tx_buf[2][TX_MAX_BURST * (PKT_SZ + 2)]; u8 rx_buf[RX_MAX_BURST * (PKT_SZ + 2)]; u8 irq_buf[2]; u8 ctrl_buf[64]; struct usb_ctrlrequest ctrl_dr; struct timer_list timer; u8 stats_buf[8]; u16 stats_vals[4]; unsigned long last_stats; u8 multicast[64]; struct ctrl_queue { u8 dir; u8 request; u16 value; u16 index; void *buf; int len; void (*callback)(struct catc *catc, struct ctrl_queue *q); } ctrl_queue[CTRL_QUEUE]; struct urb *tx_urb, *rx_urb, *irq_urb, *ctrl_urb; u8 is_f5u011; /* Set if device is an F5U011 */ u8 rxmode[2]; /* Used for F5U011 */ atomic_t recq_sz; /* Used for F5U011 - counter of waiting rx packets */ }; /* * Useful macros. */ #define catc_get_mac(catc, mac) catc_ctrl_msg(catc, USB_DIR_IN, GetMac, 0, 0, mac, 6) #define catc_reset(catc) catc_ctrl_msg(catc, USB_DIR_OUT, Reset, 0, 0, NULL, 0) #define catc_set_reg(catc, reg, val) catc_ctrl_msg(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0) #define catc_get_reg(catc, reg, buf) catc_ctrl_msg(catc, USB_DIR_IN, GetReg, 0, reg, buf, 1) #define catc_write_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size) #define catc_read_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_IN, ReadMem, 0, addr, buf, size) #define f5u011_rxmode(catc, rxmode) catc_ctrl_msg(catc, USB_DIR_OUT, SetRxMode, 0, 1, rxmode, 2) #define f5u011_rxmode_async(catc, rxmode) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 1, &rxmode, 2, NULL) #define f5u011_mchash_async(catc, hash) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 2, &hash, 8, NULL) #define catc_set_reg_async(catc, reg, val) catc_ctrl_async(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0, NULL) #define catc_get_reg_async(catc, reg, cb) catc_ctrl_async(catc, USB_DIR_IN, GetReg, 0, reg, NULL, 1, cb) #define catc_write_mem_async(catc, addr, buf, size) catc_ctrl_async(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size, NULL) /* * Receive routines. */ static void catc_rx_done(struct urb *urb) { struct catc *catc = urb->context; u8 *pkt_start = urb->transfer_buffer; struct sk_buff *skb; int pkt_len, pkt_offset = 0; int status = urb->status; if (!catc->is_f5u011) { clear_bit(RX_RUNNING, &catc->flags); pkt_offset = 2; } if (status) { dev_dbg(&urb->dev->dev, "rx_done, status %d, length %d\n", status, urb->actual_length); return; } do { if(!catc->is_f5u011) { pkt_len = le16_to_cpup((__le16*)pkt_start); if (pkt_len > urb->actual_length) { catc->netdev->stats.rx_length_errors++; catc->netdev->stats.rx_errors++; break; } } else { pkt_len = urb->actual_length; } if (!(skb = dev_alloc_skb(pkt_len))) return; skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, catc->netdev); netif_rx(skb); catc->netdev->stats.rx_packets++; catc->netdev->stats.rx_bytes += pkt_len; /* F5U011 only does one packet per RX */ if (catc->is_f5u011) break; pkt_start += (((pkt_len + 1) >> 6) + 1) << 6; } while (pkt_start - (u8 *) urb->transfer_buffer < urb->actual_length); if (catc->is_f5u011) { if (atomic_read(&catc->recq_sz)) { int state; atomic_dec(&catc->recq_sz); netdev_dbg(catc->netdev, "getting extra packet\n"); urb->dev = catc->usbdev; if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { netdev_dbg(catc->netdev, "submit(rx_urb) status %d\n", state); } } else { clear_bit(RX_RUNNING, &catc->flags); } } } static void catc_irq_done(struct urb *urb) { struct catc *catc = urb->context; u8 *data = urb->transfer_buffer; int status = urb->status; unsigned int hasdata, linksts = LinkNoChange; int res; if (!catc->is_f5u011) { hasdata = data[1] & 0x80; if (data[1] & 0x40) linksts = LinkGood; else if (data[1] & 0x20) linksts = LinkBad; } else { hasdata = (unsigned int)(be16_to_cpup((__be16*)data) & 0x0fff); if (data[0] == 0x90) linksts = LinkGood; else if (data[0] == 0xA0) linksts = LinkBad; } switch (status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: return; /* -EPIPE: should clear the halt */ default: /* error */ dev_dbg(&urb->dev->dev, "irq_done, status %d, data %02x %02x.\n", status, data[0], data[1]); goto resubmit; } if (linksts == LinkGood) { netif_carrier_on(catc->netdev); netdev_dbg(catc->netdev, "link ok\n"); } if (linksts == LinkBad) { netif_carrier_off(catc->netdev); netdev_dbg(catc->netdev, "link bad\n"); } if (hasdata) { if (test_and_set_bit(RX_RUNNING, &catc->flags)) { if (catc->is_f5u011) atomic_inc(&catc->recq_sz); } else { catc->rx_urb->dev = catc->usbdev; if ((res = usb_submit_urb(catc->rx_urb, GFP_ATOMIC)) < 0) { dev_err(&catc->usbdev->dev, "submit(rx_urb) status %d\n", res); } } } resubmit: res = usb_submit_urb (urb, GFP_ATOMIC); if (res) dev_err(&catc->usbdev->dev, "can't resubmit intr, %s-%s, status %d\n", catc->usbdev->bus->bus_name, catc->usbdev->devpath, res); } /* * Transmit routines. */ static int catc_tx_run(struct catc *catc) { int status; if (catc->is_f5u011) catc->tx_ptr = (catc->tx_ptr + 63) & ~63; catc->tx_urb->transfer_buffer_length = catc->tx_ptr; catc->tx_urb->transfer_buffer = catc->tx_buf[catc->tx_idx]; catc->tx_urb->dev = catc->usbdev; if ((status = usb_submit_urb(catc->tx_urb, GFP_ATOMIC)) < 0) dev_err(&catc->usbdev->dev, "submit(tx_urb), status %d\n", status); catc->tx_idx = !catc->tx_idx; catc->tx_ptr = 0; netif_trans_update(catc->netdev); return status; } static void catc_tx_done(struct urb *urb) { struct catc *catc = urb->context; unsigned long flags; int r, status = urb->status; if (status == -ECONNRESET) { dev_dbg(&urb->dev->dev, "Tx Reset.\n"); urb->status = 0; netif_trans_update(catc->netdev); catc->netdev->stats.tx_errors++; clear_bit(TX_RUNNING, &catc->flags); netif_wake_queue(catc->netdev); return; } if (status) { dev_dbg(&urb->dev->dev, "tx_done, status %d, length %d\n", status, urb->actual_length); return; } spin_lock_irqsave(&catc->tx_lock, flags); if (catc->tx_ptr) { r = catc_tx_run(catc); if (unlikely(r < 0)) clear_bit(TX_RUNNING, &catc->flags); } else { clear_bit(TX_RUNNING, &catc->flags); } netif_wake_queue(catc->netdev); spin_unlock_irqrestore(&catc->tx_lock, flags); } static netdev_tx_t catc_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); unsigned long flags; int r = 0; char *tx_buf; spin_lock_irqsave(&catc->tx_lock, flags); catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; if (catc->is_f5u011) *(__be16 *)tx_buf = cpu_to_be16(skb->len); else *(__le16 *)tx_buf = cpu_to_le16(skb->len); skb_copy_from_linear_data(skb, tx_buf + 2, skb->len); catc->tx_ptr += skb->len + 2; if (!test_and_set_bit(TX_RUNNING, &catc->flags)) { r = catc_tx_run(catc); if (r < 0) clear_bit(TX_RUNNING, &catc->flags); } if ((catc->is_f5u011 && catc->tx_ptr) || (catc->tx_ptr >= ((TX_MAX_BURST - 1) * (PKT_SZ + 2)))) netif_stop_queue(netdev); spin_unlock_irqrestore(&catc->tx_lock, flags); if (r >= 0) { catc->netdev->stats.tx_bytes += skb->len; catc->netdev->stats.tx_packets++; } dev_kfree_skb(skb); return NETDEV_TX_OK; } static void catc_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct catc *catc = netdev_priv(netdev); dev_warn(&netdev->dev, "Transmit timed out.\n"); usb_unlink_urb(catc->tx_urb); } /* * Control messages. */ static int catc_ctrl_msg(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len) { int retval = usb_control_msg(catc->usbdev, dir ? usb_rcvctrlpipe(catc->usbdev, 0) : usb_sndctrlpipe(catc->usbdev, 0), request, 0x40 | dir, value, index, buf, len, 1000); return retval < 0 ? retval : 0; } static void catc_ctrl_run(struct catc *catc) { struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail; struct usb_device *usbdev = catc->usbdev; struct urb *urb = catc->ctrl_urb; struct usb_ctrlrequest *dr = &catc->ctrl_dr; int status; dr->bRequest = q->request; dr->bRequestType = 0x40 | q->dir; dr->wValue = cpu_to_le16(q->value); dr->wIndex = cpu_to_le16(q->index); dr->wLength = cpu_to_le16(q->len); urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0); urb->transfer_buffer_length = q->len; urb->transfer_buffer = catc->ctrl_buf; urb->setup_packet = (void *) dr; urb->dev = usbdev; if (!q->dir && q->buf && q->len) memcpy(catc->ctrl_buf, q->buf, q->len); if ((status = usb_submit_urb(catc->ctrl_urb, GFP_ATOMIC))) dev_err(&catc->usbdev->dev, "submit(ctrl_urb) status %d\n", status); } static void catc_ctrl_done(struct urb *urb) { struct catc *catc = urb->context; struct ctrl_queue *q; unsigned long flags; int status = urb->status; if (status) dev_dbg(&urb->dev->dev, "ctrl_done, status %d, len %d.\n", status, urb->actual_length); spin_lock_irqsave(&catc->ctrl_lock, flags); q = catc->ctrl_queue + catc->ctrl_tail; if (q->dir) { if (q->buf && q->len) memcpy(q->buf, catc->ctrl_buf, q->len); else q->buf = catc->ctrl_buf; } if (q->callback) q->callback(catc, q); catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1); if (catc->ctrl_head != catc->ctrl_tail) catc_ctrl_run(catc); else clear_bit(CTRL_RUNNING, &catc->flags); spin_unlock_irqrestore(&catc->ctrl_lock, flags); } static int catc_ctrl_async(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len, void (*callback)(struct catc *catc, struct ctrl_queue *q)) { struct ctrl_queue *q; int retval = 0; unsigned long flags; spin_lock_irqsave(&catc->ctrl_lock, flags); q = catc->ctrl_queue + catc->ctrl_head; q->dir = dir; q->request = request; q->value = value; q->index = index; q->buf = buf; q->len = len; q->callback = callback; catc->ctrl_head = (catc->ctrl_head + 1) & (CTRL_QUEUE - 1); if (catc->ctrl_head == catc->ctrl_tail) { dev_err(&catc->usbdev->dev, "ctrl queue full\n"); catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1); retval = -1; } if (!test_and_set_bit(CTRL_RUNNING, &catc->flags)) catc_ctrl_run(catc); spin_unlock_irqrestore(&catc->ctrl_lock, flags); return retval; } /* * Statistics. */ static void catc_stats_done(struct catc *catc, struct ctrl_queue *q) { int index = q->index - EthStats; u16 data, last; catc->stats_buf[index] = *((char *)q->buf); if (index & 1) return; data = ((u16)catc->stats_buf[index] << 8) | catc->stats_buf[index + 1]; last = catc->stats_vals[index >> 1]; switch (index) { case TxSingleColl: case TxMultiColl: catc->netdev->stats.collisions += data - last; break; case TxExcessColl: catc->netdev->stats.tx_aborted_errors += data - last; catc->netdev->stats.tx_errors += data - last; break; case RxFramErr: catc->netdev->stats.rx_frame_errors += data - last; catc->netdev->stats.rx_errors += data - last; break; } catc->stats_vals[index >> 1] = data; } static void catc_stats_timer(struct timer_list *t) { struct catc *catc = from_timer(catc, t, timer); int i; for (i = 0; i < 8; i++) catc_get_reg_async(catc, EthStats + 7 - i, catc_stats_done); mod_timer(&catc->timer, jiffies + STATS_UPDATE); } /* * Receive modes. Broadcast, Multicast, Promisc. */ static void catc_multicast(const unsigned char *addr, u8 *multicast) { u32 crc; crc = ether_crc_le(6, addr); multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); } static void catc_set_multicast_list(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); struct netdev_hw_addr *ha; u8 broadcast[ETH_ALEN]; u8 rx = RxEnable | RxPolarity | RxMultiCast; eth_broadcast_addr(broadcast); memset(catc->multicast, 0, 64); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); if (netdev->flags & IFF_PROMISC) { memset(catc->multicast, 0xff, 64); rx |= (!catc->is_f5u011) ? RxPromisc : AltRxPromisc; } if (netdev->flags & IFF_ALLMULTI) { memset(catc->multicast, 0xff, 64); } else { netdev_for_each_mc_addr(ha, netdev) { u32 crc = ether_crc_le(6, ha->addr); if (!catc->is_f5u011) { catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); } else { catc->multicast[7-(crc >> 29)] |= 1 << ((crc >> 26) & 7); } } } if (!catc->is_f5u011) { catc_set_reg_async(catc, RxUnit, rx); catc_write_mem_async(catc, 0xfa80, catc->multicast, 64); } else { f5u011_mchash_async(catc, catc->multicast); if (catc->rxmode[0] != rx) { catc->rxmode[0] = rx; netdev_dbg(catc->netdev, "Setting RX mode to %2.2X %2.2X\n", catc->rxmode[0], catc->rxmode[1]); f5u011_rxmode_async(catc, catc->rxmode); } } } static void catc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct catc *catc = netdev_priv(dev); strscpy(info->driver, driver_name, sizeof(info->driver)); strscpy(info->version, DRIVER_VERSION, sizeof(info->version)); usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info)); } static int catc_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct catc *catc = netdev_priv(dev); if (!catc->is_f5u011) return -EOPNOTSUPP; ethtool_link_ksettings_zero_link_mode(cmd, supported); ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); ethtool_link_ksettings_add_link_mode(cmd, supported, TP); ethtool_link_ksettings_zero_link_mode(cmd, advertising); ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); cmd->base.speed = SPEED_10; cmd->base.duplex = DUPLEX_HALF; cmd->base.port = PORT_TP; cmd->base.phy_address = 0; cmd->base.autoneg = AUTONEG_DISABLE; return 0; } static const struct ethtool_ops ops = { .get_drvinfo = catc_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ksettings = catc_get_link_ksettings, }; /* * Open, close. */ static int catc_open(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); int status; catc->irq_urb->dev = catc->usbdev; if ((status = usb_submit_urb(catc->irq_urb, GFP_KERNEL)) < 0) { dev_err(&catc->usbdev->dev, "submit(irq_urb) status %d\n", status); return -1; } netif_start_queue(netdev); if (!catc->is_f5u011) mod_timer(&catc->timer, jiffies + STATS_UPDATE); return 0; } static int catc_stop(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); netif_stop_queue(netdev); if (!catc->is_f5u011) del_timer_sync(&catc->timer); usb_kill_urb(catc->rx_urb); usb_kill_urb(catc->tx_urb); usb_kill_urb(catc->irq_urb); usb_kill_urb(catc->ctrl_urb); return 0; } static const struct net_device_ops catc_netdev_ops = { .ndo_open = catc_open, .ndo_stop = catc_stop, .ndo_start_xmit = catc_start_xmit, .ndo_tx_timeout = catc_tx_timeout, .ndo_set_rx_mode = catc_set_multicast_list, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; /* * USB probe, disconnect. */ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct device *dev = &intf->dev; struct usb_device *usbdev = interface_to_usbdev(intf); struct net_device *netdev; struct catc *catc; u8 broadcast[ETH_ALEN]; u8 *macbuf; int pktsz, ret = -ENOMEM; macbuf = kmalloc(ETH_ALEN, GFP_KERNEL); if (!macbuf) goto error; if (usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 1)) { dev_err(dev, "Can't set altsetting 1.\n"); ret = -EIO; goto fail_mem; } netdev = alloc_etherdev(sizeof(struct catc)); if (!netdev) goto fail_mem; catc = netdev_priv(netdev); netdev->netdev_ops = &catc_netdev_ops; netdev->watchdog_timeo = TX_TIMEOUT; netdev->ethtool_ops = &ops; catc->usbdev = usbdev; catc->netdev = netdev; spin_lock_init(&catc->tx_lock); spin_lock_init(&catc->ctrl_lock); timer_setup(&catc->timer, catc_stats_timer, 0); catc->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL); catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL); catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL); if ((!catc->ctrl_urb) || (!catc->tx_urb) || (!catc->rx_urb) || (!catc->irq_urb)) { dev_err(&intf->dev, "No free urbs available.\n"); ret = -ENOMEM; goto fail_free; } /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */ if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 && le16_to_cpu(usbdev->descriptor.idProduct) == 0xa && le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) { dev_dbg(dev, "Testing for f5u011\n"); catc->is_f5u011 = 1; atomic_set(&catc->recq_sz, 0); pktsz = RX_PKT_SZ; } else { pktsz = RX_MAX_BURST * (PKT_SZ + 2); } usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0), NULL, NULL, 0, catc_ctrl_done, catc); usb_fill_bulk_urb(catc->tx_urb, usbdev, usb_sndbulkpipe(usbdev, 1), NULL, 0, catc_tx_done, catc); usb_fill_bulk_urb(catc->rx_urb, usbdev, usb_rcvbulkpipe(usbdev, 1), catc->rx_buf, pktsz, catc_rx_done, catc); usb_fill_int_urb(catc->irq_urb, usbdev, usb_rcvintpipe(usbdev, 2), catc->irq_buf, 2, catc_irq_done, catc, 1); if (!catc->is_f5u011) { u32 *buf; int i; dev_dbg(dev, "Checking memory size\n"); buf = kmalloc(4, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto fail_free; } *buf = 0x12345678; catc_write_mem(catc, 0x7a80, buf, 4); *buf = 0x87654321; catc_write_mem(catc, 0xfa80, buf, 4); catc_read_mem(catc, 0x7a80, buf, 4); switch (*buf) { case 0x12345678: catc_set_reg(catc, TxBufCount, 8); catc_set_reg(catc, RxBufCount, 32); dev_dbg(dev, "64k Memory\n"); break; default: dev_warn(&intf->dev, "Couldn't detect memory size, assuming 32k\n"); fallthrough; case 0x87654321: catc_set_reg(catc, TxBufCount, 4); catc_set_reg(catc, RxBufCount, 16); dev_dbg(dev, "32k Memory\n"); break; } kfree(buf); dev_dbg(dev, "Getting MAC from SEEROM.\n"); catc_get_mac(catc, macbuf); eth_hw_addr_set(netdev, macbuf); dev_dbg(dev, "Setting MAC into registers.\n"); for (i = 0; i < 6; i++) catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]); dev_dbg(dev, "Filling the multicast list.\n"); eth_broadcast_addr(broadcast); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); catc_write_mem(catc, 0xfa80, catc->multicast, 64); dev_dbg(dev, "Clearing error counters.\n"); for (i = 0; i < 8; i++) catc_set_reg(catc, EthStats + i, 0); catc->last_stats = jiffies; dev_dbg(dev, "Enabling.\n"); catc_set_reg(catc, MaxBurst, RX_MAX_BURST); catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits); catc_set_reg(catc, LEDCtrl, LEDLink); catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast); } else { dev_dbg(dev, "Performing reset\n"); catc_reset(catc); catc_get_mac(catc, macbuf); eth_hw_addr_set(netdev, macbuf); dev_dbg(dev, "Setting RX Mode\n"); catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast; catc->rxmode[1] = 0; f5u011_rxmode(catc, catc->rxmode); } dev_dbg(dev, "Init done.\n"); printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n", netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate", usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr); usb_set_intfdata(intf, catc); SET_NETDEV_DEV(netdev, &intf->dev); ret = register_netdev(netdev); if (ret) goto fail_clear_intfdata; kfree(macbuf); return 0; fail_clear_intfdata: usb_set_intfdata(intf, NULL); fail_free: usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(netdev); fail_mem: kfree(macbuf); error: return ret; } static void catc_disconnect(struct usb_interface *intf) { struct catc *catc = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (catc) { unregister_netdev(catc->netdev); usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(catc->netdev); } } /* * Module functions and tables. */ static const struct usb_device_id catc_id_table[] = { { USB_DEVICE(0x0423, 0xa) }, /* CATC Netmate, Belkin F5U011 */ { USB_DEVICE(0x0423, 0xc) }, /* CATC Netmate II, Belkin F5U111 */ { USB_DEVICE(0x08d1, 0x1) }, /* smartBridges smartNIC */ { } }; MODULE_DEVICE_TABLE(usb, catc_id_table); static struct usb_driver catc_driver = { .name = driver_name, .probe = catc_probe, .disconnect = catc_disconnect, .id_table = catc_id_table, .disable_hub_initiated_lpm = 1, }; module_usb_driver(catc_driver);
8 8 8 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 // SPDX-License-Identifier: GPL-2.0-only /* * PS/2 mouse driver * * Copyright (c) 1999-2002 Vojtech Pavlik * Copyright (c) 2003-2004 Dmitry Torokhov */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define psmouse_fmt(fmt) fmt #include <linux/bitops.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/init.h> #include <linux/libps2.h> #include <linux/mutex.h> #include <linux/types.h> #include "psmouse.h" #include "synaptics.h" #include "logips2pp.h" #include "alps.h" #include "hgpk.h" #include "lifebook.h" #include "trackpoint.h" #include "touchkit_ps2.h" #include "elantech.h" #include "sentelic.h" #include "cypress_ps2.h" #include "focaltech.h" #include "vmmouse.h" #include "byd.h" #define DRIVER_DESC "PS/2 mouse driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); static unsigned int psmouse_max_proto = PSMOUSE_AUTO; static int psmouse_set_maxproto(const char *val, const struct kernel_param *); static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp); static const struct kernel_param_ops param_ops_proto_abbrev = { .set = psmouse_set_maxproto, .get = psmouse_get_maxproto, }; #define param_check_proto_abbrev(name, p) __param_check(name, p, unsigned int) module_param_named(proto, psmouse_max_proto, proto_abbrev, 0644); MODULE_PARM_DESC(proto, "Highest protocol extension to probe (bare, imps, exps, any). Useful for KVM switches."); static unsigned int psmouse_resolution = 200; module_param_named(resolution, psmouse_resolution, uint, 0644); MODULE_PARM_DESC(resolution, "Resolution, in dpi."); static unsigned int psmouse_rate = 100; module_param_named(rate, psmouse_rate, uint, 0644); MODULE_PARM_DESC(rate, "Report rate, in reports per second."); static bool psmouse_smartscroll = true; module_param_named(smartscroll, psmouse_smartscroll, bool, 0644); MODULE_PARM_DESC(smartscroll, "Logitech Smartscroll autorepeat, 1 = enabled (default), 0 = disabled."); static bool psmouse_a4tech_2wheels; module_param_named(a4tech_workaround, psmouse_a4tech_2wheels, bool, 0644); MODULE_PARM_DESC(a4tech_workaround, "A4Tech second scroll wheel workaround, 1 = enabled, 0 = disabled (default)."); static unsigned int psmouse_resetafter = 5; module_param_named(resetafter, psmouse_resetafter, uint, 0644); MODULE_PARM_DESC(resetafter, "Reset device after so many bad packets (0 = never)."); static unsigned int psmouse_resync_time; module_param_named(resync_time, psmouse_resync_time, uint, 0644); MODULE_PARM_DESC(resync_time, "How long can mouse stay idle before forcing resync (in seconds, 0 = never)."); PSMOUSE_DEFINE_ATTR(protocol, S_IWUSR | S_IRUGO, NULL, psmouse_attr_show_protocol, psmouse_attr_set_protocol); PSMOUSE_DEFINE_ATTR(rate, S_IWUSR | S_IRUGO, (void *) offsetof(struct psmouse, rate), psmouse_show_int_attr, psmouse_attr_set_rate); PSMOUSE_DEFINE_ATTR(resolution, S_IWUSR | S_IRUGO, (void *) offsetof(struct psmouse, resolution), psmouse_show_int_attr, psmouse_attr_set_resolution); PSMOUSE_DEFINE_ATTR(resetafter, S_IWUSR | S_IRUGO, (void *) offsetof(struct psmouse, resetafter), psmouse_show_int_attr, psmouse_set_int_attr); PSMOUSE_DEFINE_ATTR(resync_time, S_IWUSR | S_IRUGO, (void *) offsetof(struct psmouse, resync_time), psmouse_show_int_attr, psmouse_set_int_attr); static struct attribute *psmouse_dev_attrs[] = { &psmouse_attr_protocol.dattr.attr, &psmouse_attr_rate.dattr.attr, &psmouse_attr_resolution.dattr.attr, &psmouse_attr_resetafter.dattr.attr, &psmouse_attr_resync_time.dattr.attr, NULL }; ATTRIBUTE_GROUPS(psmouse_dev); /* * psmouse_mutex protects all operations changing state of mouse * (connecting, disconnecting, changing rate or resolution via * sysfs). We could use a per-device semaphore but since there * rarely more than one PS/2 mouse connected and since semaphore * is taken in "slow" paths it is not worth it. */ static DEFINE_MUTEX(psmouse_mutex); static struct workqueue_struct *kpsmoused_wq; struct psmouse *psmouse_from_serio(struct serio *serio) { struct ps2dev *ps2dev = serio_get_drvdata(serio); return container_of(ps2dev, struct psmouse, ps2dev); } void psmouse_report_standard_buttons(struct input_dev *dev, u8 buttons) { input_report_key(dev, BTN_LEFT, buttons & BIT(0)); input_report_key(dev, BTN_MIDDLE, buttons & BIT(2)); input_report_key(dev, BTN_RIGHT, buttons & BIT(1)); } void psmouse_report_standard_motion(struct input_dev *dev, u8 *packet) { int x, y; x = packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0; y = packet[2] ? packet[2] - ((packet[0] << 3) & 0x100) : 0; input_report_rel(dev, REL_X, x); input_report_rel(dev, REL_Y, -y); } void psmouse_report_standard_packet(struct input_dev *dev, u8 *packet) { psmouse_report_standard_buttons(dev, packet[0]); psmouse_report_standard_motion(dev, packet); } /* * psmouse_process_byte() analyzes the PS/2 data stream and reports * relevant events to the input module once full packet has arrived. */ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse) { struct input_dev *dev = psmouse->dev; u8 *packet = psmouse->packet; int wheel; if (psmouse->pktcnt < psmouse->pktsize) return PSMOUSE_GOOD_DATA; /* Full packet accumulated, process it */ switch (psmouse->protocol->type) { case PSMOUSE_IMPS: /* IntelliMouse has scroll wheel */ input_report_rel(dev, REL_WHEEL, -(s8) packet[3]); break; case PSMOUSE_IMEX: /* Scroll wheel and buttons on IntelliMouse Explorer */ switch (packet[3] & 0xC0) { case 0x80: /* vertical scroll on IntelliMouse Explorer 4.0 */ input_report_rel(dev, REL_WHEEL, -sign_extend32(packet[3], 5)); break; case 0x40: /* horizontal scroll on IntelliMouse Explorer 4.0 */ input_report_rel(dev, REL_HWHEEL, -sign_extend32(packet[3], 5)); break; case 0x00: case 0xC0: wheel = sign_extend32(packet[3], 3); /* * Some A4Tech mice have two scroll wheels, with first * one reporting +/-1 in the lower nibble, and second * one reporting +/-2. */ if (psmouse_a4tech_2wheels && abs(wheel) > 1) input_report_rel(dev, REL_HWHEEL, wheel / 2); else input_report_rel(dev, REL_WHEEL, -wheel); input_report_key(dev, BTN_SIDE, packet[3] & BIT(4)); input_report_key(dev, BTN_EXTRA, packet[3] & BIT(5)); break; } break; case PSMOUSE_GENPS: /* Report scroll buttons on NetMice */ input_report_rel(dev, REL_WHEEL, -(s8) packet[3]); /* Extra buttons on Genius NewNet 3D */ input_report_key(dev, BTN_SIDE, packet[0] & BIT(6)); input_report_key(dev, BTN_EXTRA, packet[0] & BIT(7)); break; case PSMOUSE_THINKPS: /* Extra button on ThinkingMouse */ input_report_key(dev, BTN_EXTRA, packet[0] & BIT(3)); /* * Without this bit of weirdness moving up gives wildly * high Y changes. */ packet[1] |= (packet[0] & 0x40) << 1; break; case PSMOUSE_CORTRON: /* * Cortron PS2 Trackball reports SIDE button in the * 4th bit of the first byte. */ input_report_key(dev, BTN_SIDE, packet[0] & BIT(3)); packet[0] |= BIT(3); break; default: break; } /* Generic PS/2 Mouse */ packet[0] |= psmouse->extra_buttons; psmouse_report_standard_packet(dev, packet); input_sync(dev); return PSMOUSE_FULL_PACKET; } void psmouse_queue_work(struct psmouse *psmouse, struct delayed_work *work, unsigned long delay) { queue_delayed_work(kpsmoused_wq, work, delay); } /* * __psmouse_set_state() sets new psmouse state and resets all flags. */ static inline void __psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state) { psmouse->state = new_state; psmouse->pktcnt = psmouse->out_of_sync_cnt = 0; psmouse->ps2dev.flags = 0; psmouse->last = jiffies; } /* * psmouse_set_state() sets new psmouse state and resets all flags and * counters while holding serio lock so fighting with interrupt handler * is not a concern. */ void psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state) { serio_pause_rx(psmouse->ps2dev.serio); __psmouse_set_state(psmouse, new_state); serio_continue_rx(psmouse->ps2dev.serio); } /* * psmouse_handle_byte() processes one byte of the input data stream * by calling corresponding protocol handler. */ static int psmouse_handle_byte(struct psmouse *psmouse) { psmouse_ret_t rc = psmouse->protocol_handler(psmouse); switch (rc) { case PSMOUSE_BAD_DATA: if (psmouse->state == PSMOUSE_ACTIVATED) { psmouse_warn(psmouse, "%s at %s lost sync at byte %d\n", psmouse->name, psmouse->phys, psmouse->pktcnt); if (++psmouse->out_of_sync_cnt == psmouse->resetafter) { __psmouse_set_state(psmouse, PSMOUSE_IGNORE); psmouse_notice(psmouse, "issuing reconnect request\n"); serio_reconnect(psmouse->ps2dev.serio); return -EIO; } } psmouse->pktcnt = 0; break; case PSMOUSE_FULL_PACKET: psmouse->pktcnt = 0; if (psmouse->out_of_sync_cnt) { psmouse->out_of_sync_cnt = 0; psmouse_notice(psmouse, "%s at %s - driver resynced.\n", psmouse->name, psmouse->phys); } break; case PSMOUSE_GOOD_DATA: break; } return 0; } static void psmouse_handle_oob_data(struct psmouse *psmouse, u8 data) { switch (psmouse->oob_data_type) { case PSMOUSE_OOB_NONE: psmouse->oob_data_type = data; break; case PSMOUSE_OOB_EXTRA_BTNS: psmouse_report_standard_buttons(psmouse->dev, data); input_sync(psmouse->dev); psmouse->extra_buttons = data; psmouse->oob_data_type = PSMOUSE_OOB_NONE; break; default: psmouse_warn(psmouse, "unknown OOB_DATA type: 0x%02x\n", psmouse->oob_data_type); psmouse->oob_data_type = PSMOUSE_OOB_NONE; break; } } static enum ps2_disposition psmouse_pre_receive_byte(struct ps2dev *ps2dev, u8 data, unsigned int flags) { struct psmouse *psmouse = container_of(ps2dev, struct psmouse, ps2dev); if (psmouse->state == PSMOUSE_IGNORE) return PS2_IGNORE; if (unlikely((flags & SERIO_TIMEOUT) || ((flags & SERIO_PARITY) && !psmouse->protocol->ignore_parity))) { if (psmouse->state == PSMOUSE_ACTIVATED) psmouse_warn(psmouse, "bad data from KBC -%s%s\n", flags & SERIO_TIMEOUT ? " timeout" : "", flags & SERIO_PARITY ? " bad parity" : ""); return PS2_ERROR; } if (flags & SERIO_OOB_DATA) { psmouse_handle_oob_data(psmouse, data); return PS2_IGNORE; } return PS2_PROCESS; } static void psmouse_receive_byte(struct ps2dev *ps2dev, u8 data) { struct psmouse *psmouse = container_of(ps2dev, struct psmouse, ps2dev); pm_wakeup_event(&ps2dev->serio->dev, 0); if (psmouse->state <= PSMOUSE_RESYNCING) return; if (psmouse->state == PSMOUSE_ACTIVATED && psmouse->pktcnt && time_after(jiffies, psmouse->last + HZ/2)) { psmouse_info(psmouse, "%s at %s lost synchronization, throwing %d bytes away.\n", psmouse->name, psmouse->phys, psmouse->pktcnt); psmouse->badbyte = psmouse->packet[0]; __psmouse_set_state(psmouse, PSMOUSE_RESYNCING); psmouse_queue_work(psmouse, &psmouse->resync_work, 0); return; } psmouse->packet[psmouse->pktcnt++] = data; /* Check if this is a new device announcement (0xAA 0x00) */ if (unlikely(psmouse->packet[0] == PSMOUSE_RET_BAT && psmouse->pktcnt <= 2)) { if (psmouse->pktcnt == 1) { psmouse->last = jiffies; return; } if (psmouse->packet[1] == PSMOUSE_RET_ID || (psmouse->protocol->type == PSMOUSE_HGPK && psmouse->packet[1] == PSMOUSE_RET_BAT)) { __psmouse_set_state(psmouse, PSMOUSE_IGNORE); serio_reconnect(ps2dev->serio); return; } /* Not a new device, try processing first byte normally */ psmouse->pktcnt = 1; if (psmouse_handle_byte(psmouse)) return; psmouse->packet[psmouse->pktcnt++] = data; } /* * See if we need to force resync because mouse was idle for * too long. */ if (psmouse->state == PSMOUSE_ACTIVATED && psmouse->pktcnt == 1 && psmouse->resync_time && time_after(jiffies, psmouse->last + psmouse->resync_time * HZ)) { psmouse->badbyte = psmouse->packet[0]; __psmouse_set_state(psmouse, PSMOUSE_RESYNCING); psmouse_queue_work(psmouse, &psmouse->resync_work, 0); return; } psmouse->last = jiffies; psmouse_handle_byte(psmouse); } /* * psmouse_reset() resets the mouse into power-on state. */ int psmouse_reset(struct psmouse *psmouse) { u8 param[2]; int error; error = ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_RESET_BAT); if (error) return error; if (param[0] != PSMOUSE_RET_BAT && param[1] != PSMOUSE_RET_ID) return -EIO; return 0; } /* * Here we set the mouse resolution. */ void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution) { static const u8 params[] = { 0, 1, 2, 2, 3 }; u8 p; if (resolution == 0 || resolution > 200) resolution = 200; p = params[resolution / 50]; ps2_command(&psmouse->ps2dev, &p, PSMOUSE_CMD_SETRES); psmouse->resolution = 25 << p; } /* * Here we set the mouse report rate. */ static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate) { static const u8 rates[] = { 200, 100, 80, 60, 40, 20, 10, 0 }; u8 r; int i = 0; while (rates[i] > rate) i++; r = rates[i]; ps2_command(&psmouse->ps2dev, &r, PSMOUSE_CMD_SETRATE); psmouse->rate = r; } /* * Here we set the mouse scaling. */ static void psmouse_set_scale(struct psmouse *psmouse, enum psmouse_scale scale) { ps2_command(&psmouse->ps2dev, NULL, scale == PSMOUSE_SCALE21 ? PSMOUSE_CMD_SETSCALE21 : PSMOUSE_CMD_SETSCALE11); } /* * psmouse_poll() - default poll handler. Everyone except for ALPS uses it. */ static int psmouse_poll(struct psmouse *psmouse) { return ps2_command(&psmouse->ps2dev, psmouse->packet, PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)); } static bool psmouse_check_pnp_id(const char *id, const char * const ids[]) { int i; for (i = 0; ids[i]; i++) if (!strcasecmp(id, ids[i])) return true; return false; } /* * psmouse_matches_pnp_id - check if psmouse matches one of the passed in ids. */ bool psmouse_matches_pnp_id(struct psmouse *psmouse, const char * const ids[]) { struct serio *serio = psmouse->ps2dev.serio; char *p, *fw_id_copy, *save_ptr; bool found = false; if (strncmp(serio->firmware_id, "PNP: ", 5)) return false; fw_id_copy = kstrndup(&serio->firmware_id[5], sizeof(serio->firmware_id) - 5, GFP_KERNEL); if (!fw_id_copy) return false; save_ptr = fw_id_copy; while ((p = strsep(&fw_id_copy, " ")) != NULL) { if (psmouse_check_pnp_id(p, ids)) { found = true; break; } } kfree(save_ptr); return found; } /* * Genius NetMouse magic init. */ static int genius_detect(struct psmouse *psmouse, bool set_properties) { struct ps2dev *ps2dev = &psmouse->ps2dev; u8 param[4]; param[0] = 3; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO); if (param[0] != 0x00 || param[1] != 0x33 || param[2] != 0x55) return -ENODEV; if (set_properties) { __set_bit(BTN_MIDDLE, psmouse->dev->keybit); __set_bit(BTN_EXTRA, psmouse->dev->keybit); __set_bit(BTN_SIDE, psmouse->dev->keybit); __set_bit(REL_WHEEL, psmouse->dev->relbit); psmouse->vendor = "Genius"; psmouse->name = "Mouse"; psmouse->pktsize = 4; } return 0; } /* * IntelliMouse magic init. */ static int intellimouse_detect(struct psmouse *psmouse, bool set_properties) { struct ps2dev *ps2dev = &psmouse->ps2dev; u8 param[2]; param[0] = 200; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); param[0] = 100; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); param[0] = 80; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); ps2_command(ps2dev, param, PSMOUSE_CMD_GETID); if (param[0] != 3) return -ENODEV; if (set_properties) { __set_bit(BTN_MIDDLE, psmouse->dev->keybit); __set_bit(REL_WHEEL, psmouse->dev->relbit); if (!psmouse->vendor) psmouse->vendor = "Generic"; if (!psmouse->name) psmouse->name = "Wheel Mouse"; psmouse->pktsize = 4; } return 0; } /* * Try IntelliMouse/Explorer magic init. */ static int im_explorer_detect(struct psmouse *psmouse, bool set_properties) { struct ps2dev *ps2dev = &psmouse->ps2dev; u8 param[2]; intellimouse_detect(psmouse, 0); param[0] = 200; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); param[0] = 200; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); param[0] = 80; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); ps2_command(ps2dev, param, PSMOUSE_CMD_GETID); if (param[0] != 4) return -ENODEV; /* Magic to enable horizontal scrolling on IntelliMouse 4.0 */ param[0] = 200; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); param[0] = 80; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); param[0] = 40; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); if (set_properties) { __set_bit(BTN_MIDDLE, psmouse->dev->keybit); __set_bit(REL_WHEEL, psmouse->dev->relbit); __set_bit(REL_HWHEEL, psmouse->dev->relbit); __set_bit(BTN_SIDE, psmouse->dev->keybit); __set_bit(BTN_EXTRA, psmouse->dev->keybit); if (!psmouse->vendor) psmouse->vendor = "Generic"; if (!psmouse->name) psmouse->name = "Explorer Mouse"; psmouse->pktsize = 4; } return 0; } /* * Kensington ThinkingMouse / ExpertMouse magic init. */ static int thinking_detect(struct psmouse *psmouse, bool set_properties) { struct ps2dev *ps2dev = &psmouse->ps2dev; u8 param[2]; static const u8 seq[] = { 20, 60, 40, 20, 20, 60, 40, 20, 20 }; int i; param[0] = 10; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); param[0] = 0; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); for (i = 0; i < ARRAY_SIZE(seq); i++) { param[0] = seq[i]; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); } ps2_command(ps2dev, param, PSMOUSE_CMD_GETID); if (param[0] != 2) return -ENODEV; if (set_properties) { __set_bit(BTN_MIDDLE, psmouse->dev->keybit); __set_bit(BTN_EXTRA, psmouse->dev->keybit); psmouse->vendor = "Kensington"; psmouse->name = "ThinkingMouse"; } return 0; } /* * Bare PS/2 protocol "detection". Always succeeds. */ static int ps2bare_detect(struct psmouse *psmouse, bool set_properties) { if (set_properties) { if (!psmouse->vendor) psmouse->vendor = "Generic"; if (!psmouse->name) psmouse->name = "Mouse"; /* * We have no way of figuring true number of buttons so let's * assume that the device has 3. */ input_set_capability(psmouse->dev, EV_KEY, BTN_MIDDLE); } return 0; } /* * Cortron PS/2 protocol detection. There's no special way to detect it, so it * must be forced by sysfs protocol writing. */ static int cortron_detect(struct psmouse *psmouse, bool set_properties) { if (set_properties) { psmouse->vendor = "Cortron"; psmouse->name = "PS/2 Trackball"; __set_bit(BTN_MIDDLE, psmouse->dev->keybit); __set_bit(BTN_SIDE, psmouse->dev->keybit); } return 0; } static const struct psmouse_protocol psmouse_protocols[] = { { .type = PSMOUSE_PS2, .name = "PS/2", .alias = "bare", .maxproto = true, .ignore_parity = true, .detect = ps2bare_detect, .try_passthru = true, }, #ifdef CONFIG_MOUSE_PS2_LOGIPS2PP { .type = PSMOUSE_PS2PP, .name = "PS2++", .alias = "logitech", .detect = ps2pp_detect, }, #endif { .type = PSMOUSE_THINKPS, .name = "ThinkPS/2", .alias = "thinkps", .detect = thinking_detect, }, #ifdef CONFIG_MOUSE_PS2_CYPRESS { .type = PSMOUSE_CYPRESS, .name = "CyPS/2", .alias = "cypress", .detect = cypress_detect, .init = cypress_init, }, #endif { .type = PSMOUSE_GENPS, .name = "GenPS/2", .alias = "genius", .detect = genius_detect, }, { .type = PSMOUSE_IMPS, .name = "ImPS/2", .alias = "imps", .maxproto = true, .ignore_parity = true, .detect = intellimouse_detect, .try_passthru = true, }, { .type = PSMOUSE_IMEX, .name = "ImExPS/2", .alias = "exps", .maxproto = true, .ignore_parity = true, .detect = im_explorer_detect, .try_passthru = true, }, #ifdef CONFIG_MOUSE_PS2_SYNAPTICS { .type = PSMOUSE_SYNAPTICS, .name = "SynPS/2", .alias = "synaptics", .detect = synaptics_detect, .init = synaptics_init_absolute, }, { .type = PSMOUSE_SYNAPTICS_RELATIVE, .name = "SynRelPS/2", .alias = "synaptics-relative", .detect = synaptics_detect, .init = synaptics_init_relative, }, #endif #ifdef CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS { .type = PSMOUSE_SYNAPTICS_SMBUS, .name = "SynSMBus", .alias = "synaptics-smbus", .detect = synaptics_detect, .init = synaptics_init_smbus, .smbus_companion = true, }, #endif #ifdef CONFIG_MOUSE_PS2_ALPS { .type = PSMOUSE_ALPS, .name = "AlpsPS/2", .alias = "alps", .detect = alps_detect, .init = alps_init, }, #endif #ifdef CONFIG_MOUSE_PS2_LIFEBOOK { .type = PSMOUSE_LIFEBOOK, .name = "LBPS/2", .alias = "lifebook", .detect = lifebook_detect, .init = lifebook_init, }, #endif #ifdef CONFIG_MOUSE_PS2_TRACKPOINT { .type = PSMOUSE_TRACKPOINT, .name = "TPPS/2", .alias = "trackpoint", .detect = trackpoint_detect, .try_passthru = true, }, #endif #ifdef CONFIG_MOUSE_PS2_TOUCHKIT { .type = PSMOUSE_TOUCHKIT_PS2, .name = "touchkitPS/2", .alias = "touchkit", .detect = touchkit_ps2_detect, }, #endif #ifdef CONFIG_MOUSE_PS2_OLPC { .type = PSMOUSE_HGPK, .name = "OLPC HGPK", .alias = "hgpk", .detect = hgpk_detect, }, #endif #ifdef CONFIG_MOUSE_PS2_ELANTECH { .type = PSMOUSE_ELANTECH, .name = "ETPS/2", .alias = "elantech", .detect = elantech_detect, .init = elantech_init_ps2, }, #endif #ifdef CONFIG_MOUSE_PS2_ELANTECH_SMBUS { .type = PSMOUSE_ELANTECH_SMBUS, .name = "ETSMBus", .alias = "elantech-smbus", .detect = elantech_detect, .init = elantech_init_smbus, .smbus_companion = true, }, #endif #ifdef CONFIG_MOUSE_PS2_SENTELIC { .type = PSMOUSE_FSP, .name = "FSPPS/2", .alias = "fsp", .detect = fsp_detect, .init = fsp_init, }, #endif { .type = PSMOUSE_CORTRON, .name = "CortronPS/2", .alias = "cortps", .detect = cortron_detect, }, #ifdef CONFIG_MOUSE_PS2_FOCALTECH { .type = PSMOUSE_FOCALTECH, .name = "FocalTechPS/2", .alias = "focaltech", .detect = focaltech_detect, .init = focaltech_init, }, #endif #ifdef CONFIG_MOUSE_PS2_VMMOUSE { .type = PSMOUSE_VMMOUSE, .name = VMMOUSE_PSNAME, .alias = "vmmouse", .detect = vmmouse_detect, .init = vmmouse_init, }, #endif #ifdef CONFIG_MOUSE_PS2_BYD { .type = PSMOUSE_BYD, .name = "BYDPS/2", .alias = "byd", .detect = byd_detect, .init = byd_init, }, #endif { .type = PSMOUSE_AUTO, .name = "auto", .alias = "any", .maxproto = true, }, }; static const struct psmouse_protocol *__psmouse_protocol_by_type(enum psmouse_type type) { int i; for (i = 0; i < ARRAY_SIZE(psmouse_protocols); i++) if (psmouse_protocols[i].type == type) return &psmouse_protocols[i]; return NULL; } static const struct psmouse_protocol *psmouse_protocol_by_type(enum psmouse_type type) { const struct psmouse_protocol *proto; proto = __psmouse_protocol_by_type(type); if (proto) return proto; WARN_ON(1); return &psmouse_protocols[0]; } static const struct psmouse_protocol *psmouse_protocol_by_name(const char *name, size_t len) { const struct psmouse_protocol *p; int i; for (i = 0; i < ARRAY_SIZE(psmouse_protocols); i++) { p = &psmouse_protocols[i]; if ((strlen(p->name) == len && !strncmp(p->name, name, len)) || (strlen(p->alias) == len && !strncmp(p->alias, name, len))) return &psmouse_protocols[i]; } return NULL; } /* * Apply default settings to the psmouse structure. Most of them will * be overridden by individual protocol initialization routines. */ static void psmouse_apply_defaults(struct psmouse *psmouse) { struct input_dev *input_dev = psmouse->dev; bitmap_zero(input_dev->evbit, EV_CNT); bitmap_zero(input_dev->keybit, KEY_CNT); bitmap_zero(input_dev->relbit, REL_CNT); bitmap_zero(input_dev->absbit, ABS_CNT); bitmap_zero(input_dev->mscbit, MSC_CNT); input_set_capability(input_dev, EV_KEY, BTN_LEFT); input_set_capability(input_dev, EV_KEY, BTN_RIGHT); input_set_capability(input_dev, EV_REL, REL_X); input_set_capability(input_dev, EV_REL, REL_Y); __set_bit(INPUT_PROP_POINTER, input_dev->propbit); psmouse->protocol = &psmouse_protocols[0]; psmouse->set_rate = psmouse_set_rate; psmouse->set_resolution = psmouse_set_resolution; psmouse->set_scale = psmouse_set_scale; psmouse->poll = psmouse_poll; psmouse->protocol_handler = psmouse_process_byte; psmouse->pktsize = 3; psmouse->reconnect = NULL; psmouse->fast_reconnect = NULL; psmouse->disconnect = NULL; psmouse->cleanup = NULL; psmouse->pt_activate = NULL; psmouse->pt_deactivate = NULL; } static bool psmouse_do_detect(int (*detect)(struct psmouse *, bool), struct psmouse *psmouse, bool allow_passthrough, bool set_properties) { if (psmouse->ps2dev.serio->id.type == SERIO_PS_PSTHRU && !allow_passthrough) { return false; } if (set_properties) psmouse_apply_defaults(psmouse); return detect(psmouse, set_properties) == 0; } static bool psmouse_try_protocol(struct psmouse *psmouse, enum psmouse_type type, unsigned int *max_proto, bool set_properties, bool init_allowed) { const struct psmouse_protocol *proto; proto = __psmouse_protocol_by_type(type); if (!proto) return false; if (!psmouse_do_detect(proto->detect, psmouse, proto->try_passthru, set_properties)) return false; if (set_properties && proto->init && init_allowed) { if (proto->init(psmouse) != 0) { /* * We detected device, but init failed. Adjust * max_proto so we only try standard protocols. */ if (*max_proto > PSMOUSE_IMEX) *max_proto = PSMOUSE_IMEX; return false; } } return true; } /* * psmouse_extensions() probes for any extensions to the basic PS/2 protocol * the mouse may have. */ static int psmouse_extensions(struct psmouse *psmouse, unsigned int max_proto, bool set_properties) { bool synaptics_hardware = false; int ret; /* * Always check for focaltech, this is safe as it uses pnp-id * matching. */ if (psmouse_do_detect(focaltech_detect, psmouse, false, set_properties)) { if (max_proto > PSMOUSE_IMEX && IS_ENABLED(CONFIG_MOUSE_PS2_FOCALTECH) && (!set_properties || focaltech_init(psmouse) == 0)) { return PSMOUSE_FOCALTECH; } /* * Restrict psmouse_max_proto so that psmouse_initialize() * does not try to reset rate and resolution, because even * that upsets the device. * This also causes us to basically fall through to basic * protocol detection, where we fully reset the mouse, * and set it up as bare PS/2 protocol device. */ psmouse_max_proto = max_proto = PSMOUSE_PS2; } /* * We always check for LifeBook because it does not disturb mouse * (it only checks DMI information). */ if (psmouse_try_protocol(psmouse, PSMOUSE_LIFEBOOK, &max_proto, set_properties, max_proto > PSMOUSE_IMEX)) return PSMOUSE_LIFEBOOK; if (psmouse_try_protocol(psmouse, PSMOUSE_VMMOUSE, &max_proto, set_properties, max_proto > PSMOUSE_IMEX)) return PSMOUSE_VMMOUSE; /* * Try Kensington ThinkingMouse (we try first, because Synaptics * probe upsets the ThinkingMouse). */ if (max_proto > PSMOUSE_IMEX && psmouse_try_protocol(psmouse, PSMOUSE_THINKPS, &max_proto, set_properties, true)) { return PSMOUSE_THINKPS; } /* * Try Synaptics TouchPad. Note that probing is done even if * Synaptics protocol support is disabled in config - we need to * know if it is Synaptics so we can reset it properly after * probing for IntelliMouse. */ if (max_proto > PSMOUSE_PS2 && psmouse_do_detect(synaptics_detect, psmouse, false, set_properties)) { synaptics_hardware = true; if (max_proto > PSMOUSE_IMEX) { /* * Try activating protocol, but check if support is * enabled first, since we try detecting Synaptics * even when protocol is disabled. */ if (IS_ENABLED(CONFIG_MOUSE_PS2_SYNAPTICS) || IS_ENABLED(CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS)) { if (!set_properties) return PSMOUSE_SYNAPTICS; ret = synaptics_init(psmouse); if (ret >= 0) return ret; } /* * Some Synaptics touchpads can emulate extended * protocols (like IMPS/2). Unfortunately * Logitech/Genius probes confuse some firmware * versions so we'll have to skip them. */ max_proto = PSMOUSE_IMEX; } /* * Make sure that touchpad is in relative mode, gestures * (taps) are enabled. */ synaptics_reset(psmouse); } /* * Try Cypress Trackpad. We must try it before Finger Sensing Pad * because Finger Sensing Pad probe upsets some modules of Cypress * Trackpads. */ if (max_proto > PSMOUSE_IMEX && psmouse_try_protocol(psmouse, PSMOUSE_CYPRESS, &max_proto, set_properties, true)) { return PSMOUSE_CYPRESS; } /* Try ALPS TouchPad */ if (max_proto > PSMOUSE_IMEX) { ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS); if (psmouse_try_protocol(psmouse, PSMOUSE_ALPS, &max_proto, set_properties, true)) return PSMOUSE_ALPS; } /* Try OLPC HGPK touchpad */ if (max_proto > PSMOUSE_IMEX && psmouse_try_protocol(psmouse, PSMOUSE_HGPK, &max_proto, set_properties, true)) { return PSMOUSE_HGPK; } /* Try Elantech touchpad */ if (max_proto > PSMOUSE_IMEX && psmouse_try_protocol(psmouse, PSMOUSE_ELANTECH, &max_proto, set_properties, false)) { if (!set_properties) return PSMOUSE_ELANTECH; ret = elantech_init(psmouse); if (ret >= 0) return ret; } if (max_proto > PSMOUSE_IMEX) { if (psmouse_try_protocol(psmouse, PSMOUSE_GENPS, &max_proto, set_properties, true)) return PSMOUSE_GENPS; if (psmouse_try_protocol(psmouse, PSMOUSE_PS2PP, &max_proto, set_properties, true)) return PSMOUSE_PS2PP; if (psmouse_try_protocol(psmouse, PSMOUSE_TRACKPOINT, &max_proto, set_properties, true)) return PSMOUSE_TRACKPOINT; if (psmouse_try_protocol(psmouse, PSMOUSE_TOUCHKIT_PS2, &max_proto, set_properties, true)) return PSMOUSE_TOUCHKIT_PS2; } /* * Try Finger Sensing Pad. We do it here because its probe upsets * Trackpoint devices (causing TP_READ_ID command to time out). */ if (max_proto > PSMOUSE_IMEX && psmouse_try_protocol(psmouse, PSMOUSE_FSP, &max_proto, set_properties, true)) { return PSMOUSE_FSP; } /* * Reset to defaults in case the device got confused by extended * protocol probes. Note that we follow up with full reset because * some mice put themselves to sleep when they see PSMOUSE_RESET_DIS. */ ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS); psmouse_reset(psmouse); if (max_proto >= PSMOUSE_IMEX && psmouse_try_protocol(psmouse, PSMOUSE_IMEX, &max_proto, set_properties, true)) { return PSMOUSE_IMEX; } if (max_proto >= PSMOUSE_IMPS && psmouse_try_protocol(psmouse, PSMOUSE_IMPS, &max_proto, set_properties, true)) { return PSMOUSE_IMPS; } /* * Okay, all failed, we have a standard mouse here. The number of * the buttons is still a question, though. We assume 3. */ psmouse_try_protocol(psmouse, PSMOUSE_PS2, &max_proto, set_properties, true); if (synaptics_hardware) { /* * We detected Synaptics hardware but it did not respond to * IMPS/2 probes. We need to reset the touchpad because if * there is a track point on the pass through port it could * get disabled while probing for protocol extensions. */ psmouse_reset(psmouse); } return PSMOUSE_PS2; } /* * psmouse_probe() probes for a PS/2 mouse. */ static int psmouse_probe(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; u8 param[2]; int error; /* * First, we check if it's a mouse. It should send 0x00 or 0x03 in * case of an IntelliMouse in 4-byte mode or 0x04 for IM Explorer. * Sunrex K8561 IR Keyboard/Mouse reports 0xff on second and * subsequent ID queries, probably due to a firmware bug. */ param[0] = 0xa5; error = ps2_command(ps2dev, param, PSMOUSE_CMD_GETID); if (error) return error; if (param[0] != 0x00 && param[0] != 0x03 && param[0] != 0x04 && param[0] != 0xff) return -ENODEV; /* * Then we reset and disable the mouse so that it doesn't generate * events. */ error = ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_DIS); if (error) psmouse_warn(psmouse, "Failed to reset mouse on %s: %d\n", ps2dev->serio->phys, error); return 0; } /* * psmouse_initialize() initializes the mouse to a sane state. */ static void psmouse_initialize(struct psmouse *psmouse) { /* * We set the mouse report rate, resolution and scaling. */ if (psmouse_max_proto != PSMOUSE_PS2) { psmouse->set_rate(psmouse, psmouse->rate); psmouse->set_resolution(psmouse, psmouse->resolution); psmouse->set_scale(psmouse, PSMOUSE_SCALE11); } } /* * psmouse_activate() enables the mouse so that we get motion reports from it. */ int psmouse_activate(struct psmouse *psmouse) { if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE)) { psmouse_warn(psmouse, "Failed to enable mouse on %s\n", psmouse->ps2dev.serio->phys); return -1; } psmouse_set_state(psmouse, PSMOUSE_ACTIVATED); return 0; } /* * psmouse_deactivate() puts the mouse into poll mode so that we don't get * motion reports from it unless we explicitly request it. */ int psmouse_deactivate(struct psmouse *psmouse) { int error; error = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE); if (error) { psmouse_warn(psmouse, "Failed to deactivate mouse on %s: %d\n", psmouse->ps2dev.serio->phys, error); return error; } psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); return 0; } /* * psmouse_resync() attempts to re-validate current protocol. */ static void psmouse_resync(struct work_struct *work) { struct psmouse *parent = NULL, *psmouse = container_of(work, struct psmouse, resync_work.work); struct serio *serio = psmouse->ps2dev.serio; psmouse_ret_t rc = PSMOUSE_GOOD_DATA; bool failed = false, enabled = false; int i; mutex_lock(&psmouse_mutex); if (psmouse->state != PSMOUSE_RESYNCING) goto out; if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { parent = psmouse_from_serio(serio->parent); psmouse_deactivate(parent); } /* * Some mice don't ACK commands sent while they are in the middle of * transmitting motion packet. To avoid delay we use ps2_sendbyte() * instead of ps2_command() which would wait for 200ms for an ACK * that may never come. * As an additional quirk ALPS touchpads may not only forget to ACK * disable command but will stop reporting taps, so if we see that * mouse at least once ACKs disable we will do full reconnect if ACK * is missing. */ psmouse->num_resyncs++; if (ps2_sendbyte(&psmouse->ps2dev, PSMOUSE_CMD_DISABLE, 20)) { if (psmouse->num_resyncs < 3 || psmouse->acks_disable_command) failed = true; } else psmouse->acks_disable_command = true; /* * Poll the mouse. If it was reset the packet will be shorter than * psmouse->pktsize and ps2_command will fail. We do not expect and * do not handle scenario when mouse "upgrades" its protocol while * disconnected since it would require additional delay. If we ever * see a mouse that does it we'll adjust the code. */ if (!failed) { if (psmouse->poll(psmouse)) failed = true; else { psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); for (i = 0; i < psmouse->pktsize; i++) { psmouse->pktcnt++; rc = psmouse->protocol_handler(psmouse); if (rc != PSMOUSE_GOOD_DATA) break; } if (rc != PSMOUSE_FULL_PACKET) failed = true; psmouse_set_state(psmouse, PSMOUSE_RESYNCING); } } /* * Now try to enable mouse. We try to do that even if poll failed * and also repeat our attempts 5 times, otherwise we may be left * out with disabled mouse. */ for (i = 0; i < 5; i++) { if (!ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE)) { enabled = true; break; } msleep(200); } if (!enabled) { psmouse_warn(psmouse, "failed to re-enable mouse on %s\n", psmouse->ps2dev.serio->phys); failed = true; } if (failed) { psmouse_set_state(psmouse, PSMOUSE_IGNORE); psmouse_info(psmouse, "resync failed, issuing reconnect request\n"); serio_reconnect(serio); } else psmouse_set_state(psmouse, PSMOUSE_ACTIVATED); if (parent) psmouse_activate(parent); out: mutex_unlock(&psmouse_mutex); } /* * psmouse_cleanup() resets the mouse into power-on state. */ static void psmouse_cleanup(struct serio *serio) { struct psmouse *psmouse = psmouse_from_serio(serio); struct psmouse *parent = NULL; mutex_lock(&psmouse_mutex); if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { parent = psmouse_from_serio(serio->parent); psmouse_deactivate(parent); } psmouse_set_state(psmouse, PSMOUSE_INITIALIZING); /* * Disable stream mode so cleanup routine can proceed undisturbed. */ if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE)) psmouse_warn(psmouse, "Failed to disable mouse on %s\n", psmouse->ps2dev.serio->phys); if (psmouse->cleanup) psmouse->cleanup(psmouse); /* * Reset the mouse to defaults (bare PS/2 protocol). */ ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS); /* * Some boxes, such as HP nx7400, get terribly confused if mouse * is not fully enabled before suspending/shutting down. */ ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE); if (parent) { if (parent->pt_deactivate) parent->pt_deactivate(parent); psmouse_activate(parent); } mutex_unlock(&psmouse_mutex); } /* * psmouse_disconnect() closes and frees. */ static void psmouse_disconnect(struct serio *serio) { struct psmouse *psmouse = psmouse_from_serio(serio); struct psmouse *parent = NULL; mutex_lock(&psmouse_mutex); psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); /* make sure we don't have a resync in progress */ mutex_unlock(&psmouse_mutex); flush_workqueue(kpsmoused_wq); mutex_lock(&psmouse_mutex); if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { parent = psmouse_from_serio(serio->parent); psmouse_deactivate(parent); } if (psmouse->disconnect) psmouse->disconnect(psmouse); if (parent && parent->pt_deactivate) parent->pt_deactivate(parent); psmouse_set_state(psmouse, PSMOUSE_IGNORE); serio_close(serio); serio_set_drvdata(serio, NULL); if (psmouse->dev) input_unregister_device(psmouse->dev); kfree(psmouse); if (parent) psmouse_activate(parent); mutex_unlock(&psmouse_mutex); } static int psmouse_switch_protocol(struct psmouse *psmouse, const struct psmouse_protocol *proto) { const struct psmouse_protocol *selected_proto; struct input_dev *input_dev = psmouse->dev; enum psmouse_type type; input_dev->dev.parent = &psmouse->ps2dev.serio->dev; if (proto && (proto->detect || proto->init)) { psmouse_apply_defaults(psmouse); if (proto->detect && proto->detect(psmouse, true) < 0) return -1; if (proto->init && proto->init(psmouse) < 0) return -1; selected_proto = proto; } else { type = psmouse_extensions(psmouse, psmouse_max_proto, true); selected_proto = psmouse_protocol_by_type(type); } psmouse->protocol = selected_proto; /* * If mouse's packet size is 3 there is no point in polling the * device in hopes to detect protocol reset - we won't get less * than 3 bytes response anyhow. */ if (psmouse->pktsize == 3) psmouse->resync_time = 0; /* * Some smart KVMs fake response to POLL command returning just * 3 bytes and messing up our resync logic, so if initial poll * fails we won't try polling the device anymore. Hopefully * such KVM will maintain initially selected protocol. */ if (psmouse->resync_time && psmouse->poll(psmouse)) psmouse->resync_time = 0; snprintf(psmouse->devname, sizeof(psmouse->devname), "%s %s %s", selected_proto->name, psmouse->vendor, psmouse->name); input_dev->name = psmouse->devname; input_dev->phys = psmouse->phys; input_dev->id.bustype = BUS_I8042; input_dev->id.vendor = 0x0002; input_dev->id.product = psmouse->protocol->type; input_dev->id.version = psmouse->model; return 0; } /* * psmouse_connect() is a callback from the serio module when * an unhandled serio port is found. */ static int psmouse_connect(struct serio *serio, struct serio_driver *drv) { struct psmouse *psmouse, *parent = NULL; struct input_dev *input_dev; int retval = 0, error = -ENOMEM; mutex_lock(&psmouse_mutex); /* * If this is a pass-through port deactivate parent so the device * connected to this port can be successfully identified */ if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { parent = psmouse_from_serio(serio->parent); psmouse_deactivate(parent); } psmouse = kzalloc(sizeof(*psmouse), GFP_KERNEL); input_dev = input_allocate_device(); if (!psmouse || !input_dev) goto err_free; ps2_init(&psmouse->ps2dev, serio, psmouse_pre_receive_byte, psmouse_receive_byte); INIT_DELAYED_WORK(&psmouse->resync_work, psmouse_resync); psmouse->dev = input_dev; snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys); psmouse_set_state(psmouse, PSMOUSE_INITIALIZING); error = serio_open(serio, drv); if (error) goto err_clear_drvdata; /* give PT device some time to settle down before probing */ if (serio->id.type == SERIO_PS_PSTHRU) usleep_range(10000, 15000); if (psmouse_probe(psmouse) < 0) { error = -ENODEV; goto err_close_serio; } psmouse->rate = psmouse_rate; psmouse->resolution = psmouse_resolution; psmouse->resetafter = psmouse_resetafter; psmouse->resync_time = parent ? 0 : psmouse_resync_time; psmouse->smartscroll = psmouse_smartscroll; psmouse_switch_protocol(psmouse, NULL); if (!psmouse->protocol->smbus_companion) { psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); psmouse_initialize(psmouse); error = input_register_device(input_dev); if (error) goto err_protocol_disconnect; } else { /* Smbus companion will be reporting events, not us. */ input_free_device(input_dev); psmouse->dev = input_dev = NULL; } if (parent && parent->pt_activate) parent->pt_activate(parent); /* * PS/2 devices having SMBus companions should stay disabled * on PS/2 side, in order to have SMBus part operable. */ if (!psmouse->protocol->smbus_companion) psmouse_activate(psmouse); out: /* If this is a pass-through port the parent needs to be re-activated */ if (parent) psmouse_activate(parent); mutex_unlock(&psmouse_mutex); return retval; err_protocol_disconnect: if (psmouse->disconnect) psmouse->disconnect(psmouse); psmouse_set_state(psmouse, PSMOUSE_IGNORE); err_close_serio: serio_close(serio); err_clear_drvdata: serio_set_drvdata(serio, NULL); err_free: input_free_device(input_dev); kfree(psmouse); retval = error; goto out; } static int __psmouse_reconnect(struct serio *serio, bool fast_reconnect) { struct psmouse *psmouse = psmouse_from_serio(serio); struct psmouse *parent = NULL; int (*reconnect_handler)(struct psmouse *); enum psmouse_type type; int rc = -1; mutex_lock(&psmouse_mutex); if (fast_reconnect) { reconnect_handler = psmouse->fast_reconnect; if (!reconnect_handler) { rc = -ENOENT; goto out_unlock; } } else { reconnect_handler = psmouse->reconnect; } if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { parent = psmouse_from_serio(serio->parent); psmouse_deactivate(parent); } psmouse_set_state(psmouse, PSMOUSE_INITIALIZING); if (reconnect_handler) { if (reconnect_handler(psmouse)) goto out; } else { psmouse_reset(psmouse); if (psmouse_probe(psmouse) < 0) goto out; type = psmouse_extensions(psmouse, psmouse_max_proto, false); if (psmouse->protocol->type != type) goto out; } /* * OK, the device type (and capabilities) match the old one, * we can continue using it, complete initialization */ if (!psmouse->protocol->smbus_companion) { psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); psmouse_initialize(psmouse); } if (parent && parent->pt_activate) parent->pt_activate(parent); /* * PS/2 devices having SMBus companions should stay disabled * on PS/2 side, in order to have SMBus part operable. */ if (!psmouse->protocol->smbus_companion) psmouse_activate(psmouse); rc = 0; out: /* If this is a pass-through port the parent waits to be activated */ if (parent) psmouse_activate(parent); out_unlock: mutex_unlock(&psmouse_mutex); return rc; } static int psmouse_reconnect(struct serio *serio) { return __psmouse_reconnect(serio, false); } static int psmouse_fast_reconnect(struct serio *serio) { return __psmouse_reconnect(serio, true); } static struct serio_device_id psmouse_serio_ids[] = { { .type = SERIO_8042, .proto = SERIO_ANY, .id = SERIO_ANY, .extra = SERIO_ANY, }, { .type = SERIO_PS_PSTHRU, .proto = SERIO_ANY, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, psmouse_serio_ids); static struct serio_driver psmouse_drv = { .driver = { .name = "psmouse", .dev_groups = psmouse_dev_groups, }, .description = DRIVER_DESC, .id_table = psmouse_serio_ids, .interrupt = ps2_interrupt, .connect = psmouse_connect, .reconnect = psmouse_reconnect, .fast_reconnect = psmouse_fast_reconnect, .disconnect = psmouse_disconnect, .cleanup = psmouse_cleanup, }; ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *devattr, char *buf) { struct serio *serio = to_serio_port(dev); struct psmouse_attribute *attr = to_psmouse_attr(devattr); struct psmouse *psmouse = psmouse_from_serio(serio); if (psmouse->protocol->smbus_companion && devattr != &psmouse_attr_protocol.dattr) return -ENOENT; return attr->show(psmouse, attr->data, buf); } ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct serio *serio = to_serio_port(dev); struct psmouse_attribute *attr = to_psmouse_attr(devattr); struct psmouse *psmouse, *parent = NULL; int retval; retval = mutex_lock_interruptible(&psmouse_mutex); if (retval) goto out; psmouse = psmouse_from_serio(serio); if (psmouse->protocol->smbus_companion && devattr != &psmouse_attr_protocol.dattr) { retval = -ENOENT; goto out_unlock; } if (attr->protect) { if (psmouse->state == PSMOUSE_IGNORE) { retval = -ENODEV; goto out_unlock; } if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { parent = psmouse_from_serio(serio->parent); psmouse_deactivate(parent); } if (!psmouse->protocol->smbus_companion) psmouse_deactivate(psmouse); } retval = attr->set(psmouse, attr->data, buf, count); if (attr->protect) { if (retval != -ENODEV && !psmouse->protocol->smbus_companion) psmouse_activate(psmouse); if (parent) psmouse_activate(parent); } out_unlock: mutex_unlock(&psmouse_mutex); out: return retval; } static ssize_t psmouse_show_int_attr(struct psmouse *psmouse, void *offset, char *buf) { unsigned int *field = (unsigned int *)((char *)psmouse + (size_t)offset); return sprintf(buf, "%u\n", *field); } static ssize_t psmouse_set_int_attr(struct psmouse *psmouse, void *offset, const char *buf, size_t count) { unsigned int *field = (unsigned int *)((char *)psmouse + (size_t)offset); unsigned int value; int err; err = kstrtouint(buf, 10, &value); if (err) return err; *field = value; return count; } static ssize_t psmouse_attr_show_protocol(struct psmouse *psmouse, void *data, char *buf) { return sprintf(buf, "%s\n", psmouse->protocol->name); } static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, void *data, const char *buf, size_t count) { struct serio *serio = psmouse->ps2dev.serio; struct psmouse *parent = NULL; struct input_dev *old_dev, *new_dev; const struct psmouse_protocol *proto, *old_proto; int error; int retry = 0; proto = psmouse_protocol_by_name(buf, count); if (!proto) return -EINVAL; if (psmouse->protocol == proto) return count; new_dev = input_allocate_device(); if (!new_dev) return -ENOMEM; while (!list_empty(&serio->children)) { if (++retry > 3) { psmouse_warn(psmouse, "failed to destroy children ports, protocol change aborted.\n"); input_free_device(new_dev); return -EIO; } mutex_unlock(&psmouse_mutex); serio_unregister_child_port(serio); mutex_lock(&psmouse_mutex); if (serio->drv != &psmouse_drv) { input_free_device(new_dev); return -ENODEV; } if (psmouse->protocol == proto) { input_free_device(new_dev); return count; /* switched by other thread */ } } if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { parent = psmouse_from_serio(serio->parent); if (parent->pt_deactivate) parent->pt_deactivate(parent); } old_dev = psmouse->dev; old_proto = psmouse->protocol; if (psmouse->disconnect) psmouse->disconnect(psmouse); psmouse_set_state(psmouse, PSMOUSE_IGNORE); psmouse->dev = new_dev; psmouse_set_state(psmouse, PSMOUSE_INITIALIZING); if (psmouse_switch_protocol(psmouse, proto) < 0) { psmouse_reset(psmouse); /* default to PSMOUSE_PS2 */ psmouse_switch_protocol(psmouse, &psmouse_protocols[0]); } psmouse_initialize(psmouse); psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); if (psmouse->protocol->smbus_companion) { input_free_device(psmouse->dev); psmouse->dev = NULL; } else { error = input_register_device(psmouse->dev); if (error) { if (psmouse->disconnect) psmouse->disconnect(psmouse); psmouse_set_state(psmouse, PSMOUSE_IGNORE); input_free_device(new_dev); psmouse->dev = old_dev; psmouse_set_state(psmouse, PSMOUSE_INITIALIZING); psmouse_switch_protocol(psmouse, old_proto); psmouse_initialize(psmouse); psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); return error; } } if (old_dev) input_unregister_device(old_dev); if (parent && parent->pt_activate) parent->pt_activate(parent); return count; } static ssize_t psmouse_attr_set_rate(struct psmouse *psmouse, void *data, const char *buf, size_t count) { unsigned int value; int err; err = kstrtouint(buf, 10, &value); if (err) return err; psmouse->set_rate(psmouse, value); return count; } static ssize_t psmouse_attr_set_resolution(struct psmouse *psmouse, void *data, const char *buf, size_t count) { unsigned int value; int err; err = kstrtouint(buf, 10, &value); if (err) return err; psmouse->set_resolution(psmouse, value); return count; } static int psmouse_set_maxproto(const char *val, const struct kernel_param *kp) { const struct psmouse_protocol *proto; if (!val) return -EINVAL; proto = psmouse_protocol_by_name(val, strlen(val)); if (!proto || !proto->maxproto) return -EINVAL; *((unsigned int *)kp->arg) = proto->type; return 0; } static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp) { int type = *((unsigned int *)kp->arg); return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name); } static int __init psmouse_init(void) { int err; lifebook_module_init(); synaptics_module_init(); hgpk_module_init(); err = psmouse_smbus_module_init(); if (err) return err; kpsmoused_wq = alloc_ordered_workqueue("kpsmoused", 0); if (!kpsmoused_wq) { pr_err("failed to create kpsmoused workqueue\n"); err = -ENOMEM; goto err_smbus_exit; } err = serio_register_driver(&psmouse_drv); if (err) goto err_destroy_wq; return 0; err_destroy_wq: destroy_workqueue(kpsmoused_wq); err_smbus_exit: psmouse_smbus_module_exit(); return err; } static void __exit psmouse_exit(void) { serio_unregister_driver(&psmouse_drv); destroy_workqueue(kpsmoused_wq); psmouse_smbus_module_exit(); } module_init(psmouse_init); module_exit(psmouse_exit);
4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 // SPDX-License-Identifier: GPL-2.0 /* * Greybus connections * * Copyright 2014 Google Inc. * Copyright 2014 Linaro Ltd. */ #include <linux/workqueue.h> #include <linux/greybus.h> #include "greybus_trace.h" #define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000 static void gb_connection_kref_release(struct kref *kref); static DEFINE_SPINLOCK(gb_connections_lock); static DEFINE_MUTEX(gb_connection_mutex); /* Caller holds gb_connection_mutex. */ static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id) { struct gb_host_device *hd = intf->hd; struct gb_connection *connection; list_for_each_entry(connection, &hd->connections, hd_links) { if (connection->intf == intf && connection->intf_cport_id == cport_id) return true; } return false; } static void gb_connection_get(struct gb_connection *connection) { kref_get(&connection->kref); trace_gb_connection_get(connection); } static void gb_connection_put(struct gb_connection *connection) { trace_gb_connection_put(connection); kref_put(&connection->kref, gb_connection_kref_release); } /* * Returns a reference-counted pointer to the connection if found. */ static struct gb_connection * gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id) { struct gb_connection *connection; unsigned long flags; spin_lock_irqsave(&gb_connections_lock, flags); list_for_each_entry(connection, &hd->connections, hd_links) if (connection->hd_cport_id == cport_id) { gb_connection_get(connection); goto found; } connection = NULL; found: spin_unlock_irqrestore(&gb_connections_lock, flags); return connection; } /* * Callback from the host driver to let us know that data has been * received on the bundle. */ void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id, u8 *data, size_t length) { struct gb_connection *connection; trace_gb_hd_in(hd); connection = gb_connection_hd_find(hd, cport_id); if (!connection) { dev_err(&hd->dev, "nonexistent connection (%zu bytes dropped)\n", length); return; } gb_connection_recv(connection, data, length); gb_connection_put(connection); } EXPORT_SYMBOL_GPL(greybus_data_rcvd); static void gb_connection_kref_release(struct kref *kref) { struct gb_connection *connection; connection = container_of(kref, struct gb_connection, kref); trace_gb_connection_release(connection); kfree(connection); } static void gb_connection_init_name(struct gb_connection *connection) { u16 hd_cport_id = connection->hd_cport_id; u16 cport_id = 0; u8 intf_id = 0; if (connection->intf) { intf_id = connection->intf->interface_id; cport_id = connection->intf_cport_id; } snprintf(connection->name, sizeof(connection->name), "%u/%u:%u", hd_cport_id, intf_id, cport_id); } /* * _gb_connection_create() - create a Greybus connection * @hd: host device of the connection * @hd_cport_id: host-device cport id, or -1 for dynamic allocation * @intf: remote interface, or NULL for static connections * @bundle: remote-interface bundle (may be NULL) * @cport_id: remote-interface cport id, or 0 for static connections * @handler: request handler (may be NULL) * @flags: connection flags * * Create a Greybus connection, representing the bidirectional link * between a CPort on a (local) Greybus host device and a CPort on * another Greybus interface. * * A connection also maintains the state of operations sent over the * connection. * * Serialised against concurrent create and destroy using the * gb_connection_mutex. * * Return: A pointer to the new connection if successful, or an ERR_PTR * otherwise. */ static struct gb_connection * _gb_connection_create(struct gb_host_device *hd, int hd_cport_id, struct gb_interface *intf, struct gb_bundle *bundle, int cport_id, gb_request_handler_t handler, unsigned long flags) { struct gb_connection *connection; int ret; mutex_lock(&gb_connection_mutex); if (intf && gb_connection_cport_in_use(intf, cport_id)) { dev_err(&intf->dev, "cport %u already in use\n", cport_id); ret = -EBUSY; goto err_unlock; } ret = gb_hd_cport_allocate(hd, hd_cport_id, flags); if (ret < 0) { dev_err(&hd->dev, "failed to allocate cport: %d\n", ret); goto err_unlock; } hd_cport_id = ret; connection = kzalloc(sizeof(*connection), GFP_KERNEL); if (!connection) { ret = -ENOMEM; goto err_hd_cport_release; } connection->hd_cport_id = hd_cport_id; connection->intf_cport_id = cport_id; connection->hd = hd; connection->intf = intf; connection->bundle = bundle; connection->handler = handler; connection->flags = flags; if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES)) connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL; connection->state = GB_CONNECTION_STATE_DISABLED; atomic_set(&connection->op_cycle, 0); mutex_init(&connection->mutex); spin_lock_init(&connection->lock); INIT_LIST_HEAD(&connection->operations); connection->wq = alloc_ordered_workqueue("%s:%d", 0, dev_name(&hd->dev), hd_cport_id); if (!connection->wq) { ret = -ENOMEM; goto err_free_connection; } kref_init(&connection->kref); gb_connection_init_name(connection); spin_lock_irq(&gb_connections_lock); list_add(&connection->hd_links, &hd->connections); if (bundle) list_add(&connection->bundle_links, &bundle->connections); else INIT_LIST_HEAD(&connection->bundle_links); spin_unlock_irq(&gb_connections_lock); mutex_unlock(&gb_connection_mutex); trace_gb_connection_create(connection); return connection; err_free_connection: kfree(connection); err_hd_cport_release: gb_hd_cport_release(hd, hd_cport_id); err_unlock: mutex_unlock(&gb_connection_mutex); return ERR_PTR(ret); } struct gb_connection * gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id, gb_request_handler_t handler) { return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler, GB_CONNECTION_FLAG_HIGH_PRIO); } struct gb_connection * gb_connection_create_control(struct gb_interface *intf) { return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL, GB_CONNECTION_FLAG_CONTROL | GB_CONNECTION_FLAG_HIGH_PRIO); } struct gb_connection * gb_connection_create(struct gb_bundle *bundle, u16 cport_id, gb_request_handler_t handler) { struct gb_interface *intf = bundle->intf; return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id, handler, 0); } EXPORT_SYMBOL_GPL(gb_connection_create); struct gb_connection * gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id, gb_request_handler_t handler, unsigned long flags) { struct gb_interface *intf = bundle->intf; if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK)) flags &= ~GB_CONNECTION_FLAG_CORE_MASK; return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id, handler, flags); } EXPORT_SYMBOL_GPL(gb_connection_create_flags); struct gb_connection * gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id, unsigned long flags) { flags |= GB_CONNECTION_FLAG_OFFLOADED; return gb_connection_create_flags(bundle, cport_id, NULL, flags); } EXPORT_SYMBOL_GPL(gb_connection_create_offloaded); static int gb_connection_hd_cport_enable(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; int ret; if (!hd->driver->cport_enable) return 0; ret = hd->driver->cport_enable(hd, connection->hd_cport_id, connection->flags); if (ret) { dev_err(&hd->dev, "%s: failed to enable host cport: %d\n", connection->name, ret); return ret; } return 0; } static void gb_connection_hd_cport_disable(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; int ret; if (!hd->driver->cport_disable) return; ret = hd->driver->cport_disable(hd, connection->hd_cport_id); if (ret) { dev_err(&hd->dev, "%s: failed to disable host cport: %d\n", connection->name, ret); } } static int gb_connection_hd_cport_connected(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; int ret; if (!hd->driver->cport_connected) return 0; ret = hd->driver->cport_connected(hd, connection->hd_cport_id); if (ret) { dev_err(&hd->dev, "%s: failed to set connected state: %d\n", connection->name, ret); return ret; } return 0; } static int gb_connection_hd_cport_flush(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; int ret; if (!hd->driver->cport_flush) return 0; ret = hd->driver->cport_flush(hd, connection->hd_cport_id); if (ret) { dev_err(&hd->dev, "%s: failed to flush host cport: %d\n", connection->name, ret); return ret; } return 0; } static int gb_connection_hd_cport_quiesce(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; size_t peer_space; int ret; if (!hd->driver->cport_quiesce) return 0; peer_space = sizeof(struct gb_operation_msg_hdr) + sizeof(struct gb_cport_shutdown_request); if (connection->mode_switch) peer_space += sizeof(struct gb_operation_msg_hdr); ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id, peer_space, GB_CONNECTION_CPORT_QUIESCE_TIMEOUT); if (ret) { dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n", connection->name, ret); return ret; } return 0; } static int gb_connection_hd_cport_clear(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; int ret; if (!hd->driver->cport_clear) return 0; ret = hd->driver->cport_clear(hd, connection->hd_cport_id); if (ret) { dev_err(&hd->dev, "%s: failed to clear host cport: %d\n", connection->name, ret); return ret; } return 0; } /* * Request the SVC to create a connection from AP's cport to interface's * cport. */ static int gb_connection_svc_connection_create(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; struct gb_interface *intf; u8 cport_flags; int ret; if (gb_connection_is_static(connection)) return 0; intf = connection->intf; /* * Enable either E2EFC or CSD, unless no flow control is requested. */ cport_flags = GB_SVC_CPORT_FLAG_CSV_N; if (gb_connection_flow_control_disabled(connection)) { cport_flags |= GB_SVC_CPORT_FLAG_CSD_N; } else if (gb_connection_e2efc_enabled(connection)) { cport_flags |= GB_SVC_CPORT_FLAG_CSD_N | GB_SVC_CPORT_FLAG_E2EFC; } ret = gb_svc_connection_create(hd->svc, hd->svc->ap_intf_id, connection->hd_cport_id, intf->interface_id, connection->intf_cport_id, cport_flags); if (ret) { dev_err(&connection->hd->dev, "%s: failed to create svc connection: %d\n", connection->name, ret); return ret; } return 0; } static void gb_connection_svc_connection_destroy(struct gb_connection *connection) { if (gb_connection_is_static(connection)) return; gb_svc_connection_destroy(connection->hd->svc, connection->hd->svc->ap_intf_id, connection->hd_cport_id, connection->intf->interface_id, connection->intf_cport_id); } /* Inform Interface about active CPorts */ static int gb_connection_control_connected(struct gb_connection *connection) { struct gb_control *control; u16 cport_id = connection->intf_cport_id; int ret; if (gb_connection_is_static(connection)) return 0; if (gb_connection_is_control(connection)) return 0; control = connection->intf->control; ret = gb_control_connected_operation(control, cport_id); if (ret) { dev_err(&connection->bundle->dev, "failed to connect cport: %d\n", ret); return ret; } return 0; } static void gb_connection_control_disconnecting(struct gb_connection *connection) { struct gb_control *control; u16 cport_id = connection->intf_cport_id; int ret; if (gb_connection_is_static(connection)) return; control = connection->intf->control; ret = gb_control_disconnecting_operation(control, cport_id); if (ret) { dev_err(&connection->hd->dev, "%s: failed to send disconnecting: %d\n", connection->name, ret); } } static void gb_connection_control_disconnected(struct gb_connection *connection) { struct gb_control *control; u16 cport_id = connection->intf_cport_id; int ret; if (gb_connection_is_static(connection)) return; control = connection->intf->control; if (gb_connection_is_control(connection)) { if (connection->mode_switch) { ret = gb_control_mode_switch_operation(control); if (ret) { /* * Allow mode switch to time out waiting for * mailbox event. */ return; } } return; } ret = gb_control_disconnected_operation(control, cport_id); if (ret) { dev_warn(&connection->bundle->dev, "failed to disconnect cport: %d\n", ret); } } static int gb_connection_shutdown_operation(struct gb_connection *connection, u8 phase) { struct gb_cport_shutdown_request *req; struct gb_operation *operation; int ret; operation = gb_operation_create_core(connection, GB_REQUEST_TYPE_CPORT_SHUTDOWN, sizeof(*req), 0, 0, GFP_KERNEL); if (!operation) return -ENOMEM; req = operation->request->payload; req->phase = phase; ret = gb_operation_request_send_sync(operation); gb_operation_put(operation); return ret; } static int gb_connection_cport_shutdown(struct gb_connection *connection, u8 phase) { struct gb_host_device *hd = connection->hd; const struct gb_hd_driver *drv = hd->driver; int ret; if (gb_connection_is_static(connection)) return 0; if (gb_connection_is_offloaded(connection)) { if (!drv->cport_shutdown) return 0; ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase, GB_OPERATION_TIMEOUT_DEFAULT); } else { ret = gb_connection_shutdown_operation(connection, phase); } if (ret) { dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n", connection->name, phase, ret); return ret; } return 0; } static int gb_connection_cport_shutdown_phase_1(struct gb_connection *connection) { return gb_connection_cport_shutdown(connection, 1); } static int gb_connection_cport_shutdown_phase_2(struct gb_connection *connection) { return gb_connection_cport_shutdown(connection, 2); } /* * Cancel all active operations on a connection. * * Locking: Called with connection lock held and state set to DISABLED or * DISCONNECTING. */ static void gb_connection_cancel_operations(struct gb_connection *connection, int errno) __must_hold(&connection->lock) { struct gb_operation *operation; while (!list_empty(&connection->operations)) { operation = list_last_entry(&connection->operations, struct gb_operation, links); gb_operation_get(operation); spin_unlock_irq(&connection->lock); if (gb_operation_is_incoming(operation)) gb_operation_cancel_incoming(operation, errno); else gb_operation_cancel(operation, errno); gb_operation_put(operation); spin_lock_irq(&connection->lock); } } /* * Cancel all active incoming operations on a connection. * * Locking: Called with connection lock held and state set to ENABLED_TX. */ static void gb_connection_flush_incoming_operations(struct gb_connection *connection, int errno) __must_hold(&connection->lock) { struct gb_operation *operation; bool incoming; while (!list_empty(&connection->operations)) { incoming = false; list_for_each_entry(operation, &connection->operations, links) { if (gb_operation_is_incoming(operation)) { gb_operation_get(operation); incoming = true; break; } } if (!incoming) break; spin_unlock_irq(&connection->lock); /* FIXME: flush, not cancel? */ gb_operation_cancel_incoming(operation, errno); gb_operation_put(operation); spin_lock_irq(&connection->lock); } } /* * _gb_connection_enable() - enable a connection * @connection: connection to enable * @rx: whether to enable incoming requests * * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and * ENABLED_TX->ENABLED state transitions. * * Locking: Caller holds connection->mutex. */ static int _gb_connection_enable(struct gb_connection *connection, bool rx) { int ret; /* Handle ENABLED_TX -> ENABLED transitions. */ if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) { if (!(connection->handler && rx)) return 0; spin_lock_irq(&connection->lock); connection->state = GB_CONNECTION_STATE_ENABLED; spin_unlock_irq(&connection->lock); return 0; } ret = gb_connection_hd_cport_enable(connection); if (ret) return ret; ret = gb_connection_svc_connection_create(connection); if (ret) goto err_hd_cport_clear; ret = gb_connection_hd_cport_connected(connection); if (ret) goto err_svc_connection_destroy; spin_lock_irq(&connection->lock); if (connection->handler && rx) connection->state = GB_CONNECTION_STATE_ENABLED; else connection->state = GB_CONNECTION_STATE_ENABLED_TX; spin_unlock_irq(&connection->lock); ret = gb_connection_control_connected(connection); if (ret) goto err_control_disconnecting; return 0; err_control_disconnecting: spin_lock_irq(&connection->lock); connection->state = GB_CONNECTION_STATE_DISCONNECTING; gb_connection_cancel_operations(connection, -ESHUTDOWN); spin_unlock_irq(&connection->lock); /* Transmit queue should already be empty. */ gb_connection_hd_cport_flush(connection); gb_connection_control_disconnecting(connection); gb_connection_cport_shutdown_phase_1(connection); gb_connection_hd_cport_quiesce(connection); gb_connection_cport_shutdown_phase_2(connection); gb_connection_control_disconnected(connection); connection->state = GB_CONNECTION_STATE_DISABLED; err_svc_connection_destroy: gb_connection_svc_connection_destroy(connection); err_hd_cport_clear: gb_connection_hd_cport_clear(connection); gb_connection_hd_cport_disable(connection); return ret; } int gb_connection_enable(struct gb_connection *connection) { int ret = 0; mutex_lock(&connection->mutex); if (connection->state == GB_CONNECTION_STATE_ENABLED) goto out_unlock; ret = _gb_connection_enable(connection, true); if (!ret) trace_gb_connection_enable(connection); out_unlock: mutex_unlock(&connection->mutex); return ret; } EXPORT_SYMBOL_GPL(gb_connection_enable); int gb_connection_enable_tx(struct gb_connection *connection) { int ret = 0; mutex_lock(&connection->mutex); if (connection->state == GB_CONNECTION_STATE_ENABLED) { ret = -EINVAL; goto out_unlock; } if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) goto out_unlock; ret = _gb_connection_enable(connection, false); if (!ret) trace_gb_connection_enable(connection); out_unlock: mutex_unlock(&connection->mutex); return ret; } EXPORT_SYMBOL_GPL(gb_connection_enable_tx); void gb_connection_disable_rx(struct gb_connection *connection) { mutex_lock(&connection->mutex); spin_lock_irq(&connection->lock); if (connection->state != GB_CONNECTION_STATE_ENABLED) { spin_unlock_irq(&connection->lock); goto out_unlock; } connection->state = GB_CONNECTION_STATE_ENABLED_TX; gb_connection_flush_incoming_operations(connection, -ESHUTDOWN); spin_unlock_irq(&connection->lock); trace_gb_connection_disable(connection); out_unlock: mutex_unlock(&connection->mutex); } EXPORT_SYMBOL_GPL(gb_connection_disable_rx); void gb_connection_mode_switch_prepare(struct gb_connection *connection) { connection->mode_switch = true; } void gb_connection_mode_switch_complete(struct gb_connection *connection) { gb_connection_svc_connection_destroy(connection); gb_connection_hd_cport_clear(connection); gb_connection_hd_cport_disable(connection); connection->mode_switch = false; } void gb_connection_disable(struct gb_connection *connection) { mutex_lock(&connection->mutex); if (connection->state == GB_CONNECTION_STATE_DISABLED) goto out_unlock; trace_gb_connection_disable(connection); spin_lock_irq(&connection->lock); connection->state = GB_CONNECTION_STATE_DISCONNECTING; gb_connection_cancel_operations(connection, -ESHUTDOWN); spin_unlock_irq(&connection->lock); gb_connection_hd_cport_flush(connection); gb_connection_control_disconnecting(connection); gb_connection_cport_shutdown_phase_1(connection); gb_connection_hd_cport_quiesce(connection); gb_connection_cport_shutdown_phase_2(connection); gb_connection_control_disconnected(connection); connection->state = GB_CONNECTION_STATE_DISABLED; /* control-connection tear down is deferred when mode switching */ if (!connection->mode_switch) { gb_connection_svc_connection_destroy(connection); gb_connection_hd_cport_clear(connection); gb_connection_hd_cport_disable(connection); } out_unlock: mutex_unlock(&connection->mutex); } EXPORT_SYMBOL_GPL(gb_connection_disable); /* Disable a connection without communicating with the remote end. */ void gb_connection_disable_forced(struct gb_connection *connection) { mutex_lock(&connection->mutex); if (connection->state == GB_CONNECTION_STATE_DISABLED) goto out_unlock; trace_gb_connection_disable(connection); spin_lock_irq(&connection->lock); connection->state = GB_CONNECTION_STATE_DISABLED; gb_connection_cancel_operations(connection, -ESHUTDOWN); spin_unlock_irq(&connection->lock); gb_connection_hd_cport_flush(connection); gb_connection_svc_connection_destroy(connection); gb_connection_hd_cport_clear(connection); gb_connection_hd_cport_disable(connection); out_unlock: mutex_unlock(&connection->mutex); } EXPORT_SYMBOL_GPL(gb_connection_disable_forced); /* Caller must have disabled the connection before destroying it. */ void gb_connection_destroy(struct gb_connection *connection) { if (!connection) return; if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED)) gb_connection_disable(connection); mutex_lock(&gb_connection_mutex); spin_lock_irq(&gb_connections_lock); list_del(&connection->bundle_links); list_del(&connection->hd_links); spin_unlock_irq(&gb_connections_lock); destroy_workqueue(connection->wq); gb_hd_cport_release(connection->hd, connection->hd_cport_id); connection->hd_cport_id = CPORT_ID_BAD; mutex_unlock(&gb_connection_mutex); gb_connection_put(connection); } EXPORT_SYMBOL_GPL(gb_connection_destroy); void gb_connection_latency_tag_enable(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; int ret; if (!hd->driver->latency_tag_enable) return; ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id); if (ret) { dev_err(&connection->hd->dev, "%s: failed to enable latency tag: %d\n", connection->name, ret); } } EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable); void gb_connection_latency_tag_disable(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; int ret; if (!hd->driver->latency_tag_disable) return; ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id); if (ret) { dev_err(&connection->hd->dev, "%s: failed to disable latency tag: %d\n", connection->name, ret); } } EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
85 85 84 10 8 72 104 105 39 24 8 72 99 99 231 232 9 4 33 192 20 20 20 5 5 2 1 2 2 1 2 2 2 44 18 3 4 5 2 3 3 4 7 2 24 2 12 9 1 9 1 7 3 7 2 7 3 8 2 1 2 17 14 2 8 8 15 2 8 8 3 2 1 24 2 10 11 1 11 1 11 2 9 3 8 4 10 2 9 9 9 9 3 44 33 12 10 13 110 3 86 96 102 95 58 45 14 40 18 35 24 50 2 7 50 8 58 46 12 48 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013 Nicira, Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/in.h> #include <linux/if_arp.h> #include <linux/init.h> #include <linux/in6.h> #include <linux/inetdevice.h> #include <linux/netfilter_ipv4.h> #include <linux/etherdevice.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/static_key.h> #include <net/ip.h> #include <net/icmp.h> #include <net/protocol.h> #include <net/ip_tunnels.h> #include <net/ip6_tunnel.h> #include <net/ip6_checksum.h> #include <net/arp.h> #include <net/checksum.h> #include <net/dsfield.h> #include <net/inet_ecn.h> #include <net/xfrm.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> #include <net/dst_metadata.h> #include <net/geneve.h> #include <net/vxlan.h> #include <net/erspan.h> const struct ip_tunnel_encap_ops __rcu * iptun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly; EXPORT_SYMBOL(iptun_encaps); const struct ip6_tnl_encap_ops __rcu * ip6tun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly; EXPORT_SYMBOL(ip6tun_encaps); void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { int pkt_len = skb->len - skb_inner_network_offset(skb); struct net *net = dev_net(rt->dst.dev); struct net_device *dev = skb->dev; struct iphdr *iph; int err; skb_scrub_packet(skb, xnet); skb_clear_hash_if_not_l4(skb); skb_dst_set(skb, &rt->dst); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); /* Push down and install the IP header. */ skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; __ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1); err = ip_local_out(net, sk, skb); if (dev) { if (unlikely(net_xmit_eval(err))) pkt_len = 0; iptunnel_xmit_stats(dev, pkt_len); } } EXPORT_SYMBOL_GPL(iptunnel_xmit); int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto, bool raw_proto, bool xnet) { if (unlikely(!pskb_may_pull(skb, hdr_len))) return -ENOMEM; skb_pull_rcsum(skb, hdr_len); if (!raw_proto && inner_proto == htons(ETH_P_TEB)) { struct ethhdr *eh; if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) return -ENOMEM; eh = (struct ethhdr *)skb->data; if (likely(eth_proto_is_802_3(eh->h_proto))) skb->protocol = eh->h_proto; else skb->protocol = htons(ETH_P_802_2); } else { skb->protocol = inner_proto; } skb_clear_hash_if_not_l4(skb); __vlan_hwaccel_clear_tag(skb); skb_set_queue_mapping(skb, 0); skb_scrub_packet(skb, xnet); return iptunnel_pull_offloads(skb); } EXPORT_SYMBOL_GPL(__iptunnel_pull_header); struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, gfp_t flags) { IP_TUNNEL_DECLARE_FLAGS(tun_flags) = { }; struct metadata_dst *res; struct ip_tunnel_info *dst, *src; if (!md || md->type != METADATA_IP_TUNNEL || md->u.tun_info.mode & IP_TUNNEL_INFO_TX) return NULL; src = &md->u.tun_info; res = metadata_dst_alloc(src->options_len, METADATA_IP_TUNNEL, flags); if (!res) return NULL; dst = &res->u.tun_info; dst->key.tun_id = src->key.tun_id; if (src->mode & IP_TUNNEL_INFO_IPV6) memcpy(&dst->key.u.ipv6.dst, &src->key.u.ipv6.src, sizeof(struct in6_addr)); else dst->key.u.ipv4.dst = src->key.u.ipv4.src; ip_tunnel_flags_copy(dst->key.tun_flags, src->key.tun_flags); dst->mode = src->mode | IP_TUNNEL_INFO_TX; ip_tunnel_info_opts_set(dst, ip_tunnel_info_opts(src), src->options_len, tun_flags); return res; } EXPORT_SYMBOL_GPL(iptunnel_metadata_reply); int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask) { int err; if (likely(!skb->encapsulation)) { skb_reset_inner_headers(skb); skb->encapsulation = 1; } if (skb_is_gso(skb)) { err = skb_header_unclone(skb, GFP_ATOMIC); if (unlikely(err)) return err; skb_shinfo(skb)->gso_type |= gso_type_mask; return 0; } if (skb->ip_summed != CHECKSUM_PARTIAL) { skb->ip_summed = CHECKSUM_NONE; /* We clear encapsulation here to prevent badly-written * drivers potentially deciding to offload an inner checksum * if we set CHECKSUM_PARTIAL on the outer header. * This should go away when the drivers are all fixed. */ skb->encapsulation = 0; } return 0; } EXPORT_SYMBOL_GPL(iptunnel_handle_offloads); /** * iptunnel_pmtud_build_icmp() - Build ICMP error message for PMTUD * @skb: Original packet with L2 header * @mtu: MTU value for ICMP error * * Return: length on success, negative error code if message couldn't be built. */ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu) { const struct iphdr *iph = ip_hdr(skb); struct icmphdr *icmph; struct iphdr *niph; struct ethhdr eh; int len, err; if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr))) return -EINVAL; skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN); pskb_pull(skb, ETH_HLEN); skb_reset_network_header(skb); err = pskb_trim(skb, 576 - sizeof(*niph) - sizeof(*icmph)); if (err) return err; len = skb->len + sizeof(*icmph); err = skb_cow(skb, sizeof(*niph) + sizeof(*icmph) + ETH_HLEN); if (err) return err; icmph = skb_push(skb, sizeof(*icmph)); *icmph = (struct icmphdr) { .type = ICMP_DEST_UNREACH, .code = ICMP_FRAG_NEEDED, .checksum = 0, .un.frag.__unused = 0, .un.frag.mtu = htons(mtu), }; icmph->checksum = csum_fold(skb_checksum(skb, 0, len, 0)); skb_reset_transport_header(skb); niph = skb_push(skb, sizeof(*niph)); *niph = (struct iphdr) { .ihl = sizeof(*niph) / 4u, .version = 4, .tos = 0, .tot_len = htons(len + sizeof(*niph)), .id = 0, .frag_off = htons(IP_DF), .ttl = iph->ttl, .protocol = IPPROTO_ICMP, .saddr = iph->daddr, .daddr = iph->saddr, }; ip_send_check(niph); skb_reset_network_header(skb); skb->ip_summed = CHECKSUM_NONE; eth_header(skb, skb->dev, ntohs(eh.h_proto), eh.h_source, eh.h_dest, 0); skb_reset_mac_header(skb); return skb->len; } /** * iptunnel_pmtud_check_icmp() - Trigger ICMP reply if needed and allowed * @skb: Buffer being sent by encapsulation, L2 headers expected * @mtu: Network MTU for path * * Return: 0 for no ICMP reply, length if built, negative value on error. */ static int iptunnel_pmtud_check_icmp(struct sk_buff *skb, int mtu) { const struct icmphdr *icmph = icmp_hdr(skb); const struct iphdr *iph = ip_hdr(skb); if (mtu < 576 || iph->frag_off != htons(IP_DF)) return 0; if (ipv4_is_lbcast(iph->daddr) || ipv4_is_multicast(iph->daddr) || ipv4_is_zeronet(iph->saddr) || ipv4_is_loopback(iph->saddr) || ipv4_is_lbcast(iph->saddr) || ipv4_is_multicast(iph->saddr)) return 0; if (iph->protocol == IPPROTO_ICMP && icmp_is_err(icmph->type)) return 0; return iptunnel_pmtud_build_icmp(skb, mtu); } #if IS_ENABLED(CONFIG_IPV6) /** * iptunnel_pmtud_build_icmpv6() - Build ICMPv6 error message for PMTUD * @skb: Original packet with L2 header * @mtu: MTU value for ICMPv6 error * * Return: length on success, negative error code if message couldn't be built. */ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); struct icmp6hdr *icmp6h; struct ipv6hdr *nip6h; struct ethhdr eh; int len, err; __wsum csum; if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr))) return -EINVAL; skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN); pskb_pull(skb, ETH_HLEN); skb_reset_network_header(skb); err = pskb_trim(skb, IPV6_MIN_MTU - sizeof(*nip6h) - sizeof(*icmp6h)); if (err) return err; len = skb->len + sizeof(*icmp6h); err = skb_cow(skb, sizeof(*nip6h) + sizeof(*icmp6h) + ETH_HLEN); if (err) return err; icmp6h = skb_push(skb, sizeof(*icmp6h)); *icmp6h = (struct icmp6hdr) { .icmp6_type = ICMPV6_PKT_TOOBIG, .icmp6_code = 0, .icmp6_cksum = 0, .icmp6_mtu = htonl(mtu), }; skb_reset_transport_header(skb); nip6h = skb_push(skb, sizeof(*nip6h)); *nip6h = (struct ipv6hdr) { .priority = 0, .version = 6, .flow_lbl = { 0 }, .payload_len = htons(len), .nexthdr = IPPROTO_ICMPV6, .hop_limit = ip6h->hop_limit, .saddr = ip6h->daddr, .daddr = ip6h->saddr, }; skb_reset_network_header(skb); csum = skb_checksum(skb, skb_transport_offset(skb), len, 0); icmp6h->icmp6_cksum = csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr, len, IPPROTO_ICMPV6, csum); skb->ip_summed = CHECKSUM_NONE; eth_header(skb, skb->dev, ntohs(eh.h_proto), eh.h_source, eh.h_dest, 0); skb_reset_mac_header(skb); return skb->len; } /** * iptunnel_pmtud_check_icmpv6() - Trigger ICMPv6 reply if needed and allowed * @skb: Buffer being sent by encapsulation, L2 headers expected * @mtu: Network MTU for path * * Return: 0 for no ICMPv6 reply, length if built, negative value on error. */ static int iptunnel_pmtud_check_icmpv6(struct sk_buff *skb, int mtu) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); int stype = ipv6_addr_type(&ip6h->saddr); u8 proto = ip6h->nexthdr; __be16 frag_off; int offset; if (mtu < IPV6_MIN_MTU) return 0; if (stype == IPV6_ADDR_ANY || stype == IPV6_ADDR_MULTICAST || stype == IPV6_ADDR_LOOPBACK) return 0; offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag_off); if (offset < 0 || (frag_off & htons(~0x7))) return 0; if (proto == IPPROTO_ICMPV6) { struct icmp6hdr *icmp6h; if (!pskb_may_pull(skb, skb_network_header(skb) + offset + 1 - skb->data)) return 0; icmp6h = (struct icmp6hdr *)(skb_network_header(skb) + offset); if (icmpv6_is_err(icmp6h->icmp6_type) || icmp6h->icmp6_type == NDISC_REDIRECT) return 0; } return iptunnel_pmtud_build_icmpv6(skb, mtu); } #endif /* IS_ENABLED(CONFIG_IPV6) */ /** * skb_tunnel_check_pmtu() - Check, update PMTU and trigger ICMP reply as needed * @skb: Buffer being sent by encapsulation, L2 headers expected * @encap_dst: Destination for tunnel encapsulation (outer IP) * @headroom: Encapsulation header size, bytes * @reply: Build matching ICMP or ICMPv6 message as a result * * L2 tunnel implementations that can carry IP and can be directly bridged * (currently UDP tunnels) can't always rely on IP forwarding paths to handle * PMTU discovery. In the bridged case, ICMP or ICMPv6 messages need to be built * based on payload and sent back by the encapsulation itself. * * For routable interfaces, we just need to update the PMTU for the destination. * * Return: 0 if ICMP error not needed, length if built, negative value on error */ int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst, int headroom, bool reply) { u32 mtu = dst_mtu(encap_dst) - headroom; if ((skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) || (!skb_is_gso(skb) && (skb->len - skb_network_offset(skb)) <= mtu)) return 0; skb_dst_update_pmtu_no_confirm(skb, mtu); if (!reply || skb->pkt_type == PACKET_HOST) return 0; if (skb->protocol == htons(ETH_P_IP)) return iptunnel_pmtud_check_icmp(skb, mtu); #if IS_ENABLED(CONFIG_IPV6) if (skb->protocol == htons(ETH_P_IPV6)) return iptunnel_pmtud_check_icmpv6(skb, mtu); #endif return 0; } EXPORT_SYMBOL(skb_tunnel_check_pmtu); static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = { [LWTUNNEL_IP_UNSPEC] = { .strict_start_type = LWTUNNEL_IP_OPTS }, [LWTUNNEL_IP_ID] = { .type = NLA_U64 }, [LWTUNNEL_IP_DST] = { .type = NLA_U32 }, [LWTUNNEL_IP_SRC] = { .type = NLA_U32 }, [LWTUNNEL_IP_TTL] = { .type = NLA_U8 }, [LWTUNNEL_IP_TOS] = { .type = NLA_U8 }, [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 }, [LWTUNNEL_IP_OPTS] = { .type = NLA_NESTED }, }; static const struct nla_policy ip_opts_policy[LWTUNNEL_IP_OPTS_MAX + 1] = { [LWTUNNEL_IP_OPTS_GENEVE] = { .type = NLA_NESTED }, [LWTUNNEL_IP_OPTS_VXLAN] = { .type = NLA_NESTED }, [LWTUNNEL_IP_OPTS_ERSPAN] = { .type = NLA_NESTED }, }; static const struct nla_policy geneve_opt_policy[LWTUNNEL_IP_OPT_GENEVE_MAX + 1] = { [LWTUNNEL_IP_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, [LWTUNNEL_IP_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, [LWTUNNEL_IP_OPT_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 }, }; static const struct nla_policy vxlan_opt_policy[LWTUNNEL_IP_OPT_VXLAN_MAX + 1] = { [LWTUNNEL_IP_OPT_VXLAN_GBP] = { .type = NLA_U32 }, }; static const struct nla_policy erspan_opt_policy[LWTUNNEL_IP_OPT_ERSPAN_MAX + 1] = { [LWTUNNEL_IP_OPT_ERSPAN_VER] = { .type = NLA_U8 }, [LWTUNNEL_IP_OPT_ERSPAN_INDEX] = { .type = NLA_U32 }, [LWTUNNEL_IP_OPT_ERSPAN_DIR] = { .type = NLA_U8 }, [LWTUNNEL_IP_OPT_ERSPAN_HWID] = { .type = NLA_U8 }, }; static int ip_tun_parse_opts_geneve(struct nlattr *attr, struct ip_tunnel_info *info, int opts_len, struct netlink_ext_ack *extack) { struct nlattr *tb[LWTUNNEL_IP_OPT_GENEVE_MAX + 1]; int data_len, err; err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_GENEVE_MAX, attr, geneve_opt_policy, extack); if (err) return err; if (!tb[LWTUNNEL_IP_OPT_GENEVE_CLASS] || !tb[LWTUNNEL_IP_OPT_GENEVE_TYPE] || !tb[LWTUNNEL_IP_OPT_GENEVE_DATA]) return -EINVAL; attr = tb[LWTUNNEL_IP_OPT_GENEVE_DATA]; data_len = nla_len(attr); if (data_len % 4) return -EINVAL; if (info) { struct geneve_opt *opt = ip_tunnel_info_opts(info) + opts_len; memcpy(opt->opt_data, nla_data(attr), data_len); opt->length = data_len / 4; attr = tb[LWTUNNEL_IP_OPT_GENEVE_CLASS]; opt->opt_class = nla_get_be16(attr); attr = tb[LWTUNNEL_IP_OPT_GENEVE_TYPE]; opt->type = nla_get_u8(attr); __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags); } return sizeof(struct geneve_opt) + data_len; } static int ip_tun_parse_opts_vxlan(struct nlattr *attr, struct ip_tunnel_info *info, int opts_len, struct netlink_ext_ack *extack) { struct nlattr *tb[LWTUNNEL_IP_OPT_VXLAN_MAX + 1]; int err; err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_VXLAN_MAX, attr, vxlan_opt_policy, extack); if (err) return err; if (!tb[LWTUNNEL_IP_OPT_VXLAN_GBP]) return -EINVAL; if (info) { struct vxlan_metadata *md = ip_tunnel_info_opts(info) + opts_len; attr = tb[LWTUNNEL_IP_OPT_VXLAN_GBP]; md->gbp = nla_get_u32(attr); md->gbp &= VXLAN_GBP_MASK; __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags); } return sizeof(struct vxlan_metadata); } static int ip_tun_parse_opts_erspan(struct nlattr *attr, struct ip_tunnel_info *info, int opts_len, struct netlink_ext_ack *extack) { struct nlattr *tb[LWTUNNEL_IP_OPT_ERSPAN_MAX + 1]; int err; u8 ver; err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_ERSPAN_MAX, attr, erspan_opt_policy, extack); if (err) return err; if (!tb[LWTUNNEL_IP_OPT_ERSPAN_VER]) return -EINVAL; ver = nla_get_u8(tb[LWTUNNEL_IP_OPT_ERSPAN_VER]); if (ver == 1) { if (!tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX]) return -EINVAL; } else if (ver == 2) { if (!tb[LWTUNNEL_IP_OPT_ERSPAN_DIR] || !tb[LWTUNNEL_IP_OPT_ERSPAN_HWID]) return -EINVAL; } else { return -EINVAL; } if (info) { struct erspan_metadata *md = ip_tunnel_info_opts(info) + opts_len; md->version = ver; if (ver == 1) { attr = tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX]; md->u.index = nla_get_be32(attr); } else { attr = tb[LWTUNNEL_IP_OPT_ERSPAN_DIR]; md->u.md2.dir = nla_get_u8(attr); attr = tb[LWTUNNEL_IP_OPT_ERSPAN_HWID]; set_hwid(&md->u.md2, nla_get_u8(attr)); } __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, info->key.tun_flags); } return sizeof(struct erspan_metadata); } static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info, struct netlink_ext_ack *extack) { int err, rem, opt_len, opts_len = 0; struct nlattr *nla; u32 type = 0; if (!attr) return 0; err = nla_validate(nla_data(attr), nla_len(attr), LWTUNNEL_IP_OPTS_MAX, ip_opts_policy, extack); if (err) return err; nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) { switch (nla_type(nla)) { case LWTUNNEL_IP_OPTS_GENEVE: if (type && type != IP_TUNNEL_GENEVE_OPT_BIT) return -EINVAL; opt_len = ip_tun_parse_opts_geneve(nla, info, opts_len, extack); if (opt_len < 0) return opt_len; opts_len += opt_len; if (opts_len > IP_TUNNEL_OPTS_MAX) return -EINVAL; type = IP_TUNNEL_GENEVE_OPT_BIT; break; case LWTUNNEL_IP_OPTS_VXLAN: if (type) return -EINVAL; opt_len = ip_tun_parse_opts_vxlan(nla, info, opts_len, extack); if (opt_len < 0) return opt_len; opts_len += opt_len; type = IP_TUNNEL_VXLAN_OPT_BIT; break; case LWTUNNEL_IP_OPTS_ERSPAN: if (type) return -EINVAL; opt_len = ip_tun_parse_opts_erspan(nla, info, opts_len, extack); if (opt_len < 0) return opt_len; opts_len += opt_len; type = IP_TUNNEL_ERSPAN_OPT_BIT; break; default: return -EINVAL; } } return opts_len; } static int ip_tun_get_optlen(struct nlattr *attr, struct netlink_ext_ack *extack) { return ip_tun_parse_opts(attr, NULL, extack); } static int ip_tun_set_opts(struct nlattr *attr, struct ip_tunnel_info *info, struct netlink_ext_ack *extack) { return ip_tun_parse_opts(attr, info, extack); } static int ip_tun_build_state(struct net *net, struct nlattr *attr, unsigned int family, const void *cfg, struct lwtunnel_state **ts, struct netlink_ext_ack *extack) { struct nlattr *tb[LWTUNNEL_IP_MAX + 1]; struct lwtunnel_state *new_state; struct ip_tunnel_info *tun_info; int err, opt_len; err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy, extack); if (err < 0) return err; opt_len = ip_tun_get_optlen(tb[LWTUNNEL_IP_OPTS], extack); if (opt_len < 0) return opt_len; new_state = lwtunnel_state_alloc(sizeof(*tun_info) + opt_len); if (!new_state) return -ENOMEM; new_state->type = LWTUNNEL_ENCAP_IP; tun_info = lwt_tun_info(new_state); err = ip_tun_set_opts(tb[LWTUNNEL_IP_OPTS], tun_info, extack); if (err < 0) { lwtstate_free(new_state); return err; } #ifdef CONFIG_DST_CACHE err = dst_cache_init(&tun_info->dst_cache, GFP_KERNEL); if (err) { lwtstate_free(new_state); return err; } #endif if (tb[LWTUNNEL_IP_ID]) tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP_ID]); if (tb[LWTUNNEL_IP_DST]) tun_info->key.u.ipv4.dst = nla_get_in_addr(tb[LWTUNNEL_IP_DST]); if (tb[LWTUNNEL_IP_SRC]) tun_info->key.u.ipv4.src = nla_get_in_addr(tb[LWTUNNEL_IP_SRC]); if (tb[LWTUNNEL_IP_TTL]) tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]); if (tb[LWTUNNEL_IP_TOS]) tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]); if (tb[LWTUNNEL_IP_FLAGS]) { IP_TUNNEL_DECLARE_FLAGS(flags); ip_tunnel_flags_from_be16(flags, nla_get_be16(tb[LWTUNNEL_IP_FLAGS])); ip_tunnel_clear_options_present(flags); ip_tunnel_flags_or(tun_info->key.tun_flags, tun_info->key.tun_flags, flags); } tun_info->mode = IP_TUNNEL_INFO_TX; tun_info->options_len = opt_len; *ts = new_state; return 0; } static void ip_tun_destroy_state(struct lwtunnel_state *lwtstate) { #ifdef CONFIG_DST_CACHE struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate); dst_cache_destroy(&tun_info->dst_cache); #endif } static int ip_tun_fill_encap_opts_geneve(struct sk_buff *skb, struct ip_tunnel_info *tun_info) { struct geneve_opt *opt; struct nlattr *nest; int offset = 0; nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_GENEVE); if (!nest) return -ENOMEM; while (tun_info->options_len > offset) { opt = ip_tunnel_info_opts(tun_info) + offset; if (nla_put_be16(skb, LWTUNNEL_IP_OPT_GENEVE_CLASS, opt->opt_class) || nla_put_u8(skb, LWTUNNEL_IP_OPT_GENEVE_TYPE, opt->type) || nla_put(skb, LWTUNNEL_IP_OPT_GENEVE_DATA, opt->length * 4, opt->opt_data)) { nla_nest_cancel(skb, nest); return -ENOMEM; } offset += sizeof(*opt) + opt->length * 4; } nla_nest_end(skb, nest); return 0; } static int ip_tun_fill_encap_opts_vxlan(struct sk_buff *skb, struct ip_tunnel_info *tun_info) { struct vxlan_metadata *md; struct nlattr *nest; nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_VXLAN); if (!nest) return -ENOMEM; md = ip_tunnel_info_opts(tun_info); if (nla_put_u32(skb, LWTUNNEL_IP_OPT_VXLAN_GBP, md->gbp)) { nla_nest_cancel(skb, nest); return -ENOMEM; } nla_nest_end(skb, nest); return 0; } static int ip_tun_fill_encap_opts_erspan(struct sk_buff *skb, struct ip_tunnel_info *tun_info) { struct erspan_metadata *md; struct nlattr *nest; nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_ERSPAN); if (!nest) return -ENOMEM; md = ip_tunnel_info_opts(tun_info); if (nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_VER, md->version)) goto err; if (md->version == 1 && nla_put_be32(skb, LWTUNNEL_IP_OPT_ERSPAN_INDEX, md->u.index)) goto err; if (md->version == 2 && (nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_DIR, md->u.md2.dir) || nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_HWID, get_hwid(&md->u.md2)))) goto err; nla_nest_end(skb, nest); return 0; err: nla_nest_cancel(skb, nest); return -ENOMEM; } static int ip_tun_fill_encap_opts(struct sk_buff *skb, int type, struct ip_tunnel_info *tun_info) { struct nlattr *nest; int err = 0; if (!ip_tunnel_is_options_present(tun_info->key.tun_flags)) return 0; nest = nla_nest_start_noflag(skb, type); if (!nest) return -ENOMEM; if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_info->key.tun_flags)) err = ip_tun_fill_encap_opts_geneve(skb, tun_info); else if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_info->key.tun_flags)) err = ip_tun_fill_encap_opts_vxlan(skb, tun_info); else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_info->key.tun_flags)) err = ip_tun_fill_encap_opts_erspan(skb, tun_info); if (err) { nla_nest_cancel(skb, nest); return err; } nla_nest_end(skb, nest); return 0; } static int ip_tun_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwtstate) { struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate); if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id, LWTUNNEL_IP_PAD) || nla_put_in_addr(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) || nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) || nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) || nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) || nla_put_be16(skb, LWTUNNEL_IP_FLAGS, ip_tunnel_flags_to_be16(tun_info->key.tun_flags)) || ip_tun_fill_encap_opts(skb, LWTUNNEL_IP_OPTS, tun_info)) return -ENOMEM; return 0; } static int ip_tun_opts_nlsize(struct ip_tunnel_info *info) { int opt_len; if (!ip_tunnel_is_options_present(info->key.tun_flags)) return 0; opt_len = nla_total_size(0); /* LWTUNNEL_IP_OPTS */ if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags)) { struct geneve_opt *opt; int offset = 0; opt_len += nla_total_size(0); /* LWTUNNEL_IP_OPTS_GENEVE */ while (info->options_len > offset) { opt = ip_tunnel_info_opts(info) + offset; opt_len += nla_total_size(2) /* OPT_GENEVE_CLASS */ + nla_total_size(1) /* OPT_GENEVE_TYPE */ + nla_total_size(opt->length * 4); /* OPT_GENEVE_DATA */ offset += sizeof(*opt) + opt->length * 4; } } else if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags)) { opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_VXLAN */ + nla_total_size(4); /* OPT_VXLAN_GBP */ } else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, info->key.tun_flags)) { struct erspan_metadata *md = ip_tunnel_info_opts(info); opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_ERSPAN */ + nla_total_size(1) /* OPT_ERSPAN_VER */ + (md->version == 1 ? nla_total_size(4) /* OPT_ERSPAN_INDEX (v1) */ : nla_total_size(1) + nla_total_size(1)); /* OPT_ERSPAN_DIR + HWID (v2) */ } return opt_len; } static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate) { return nla_total_size_64bit(8) /* LWTUNNEL_IP_ID */ + nla_total_size(4) /* LWTUNNEL_IP_DST */ + nla_total_size(4) /* LWTUNNEL_IP_SRC */ + nla_total_size(1) /* LWTUNNEL_IP_TOS */ + nla_total_size(1) /* LWTUNNEL_IP_TTL */ + nla_total_size(2) /* LWTUNNEL_IP_FLAGS */ + ip_tun_opts_nlsize(lwt_tun_info(lwtstate)); /* LWTUNNEL_IP_OPTS */ } static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b) { struct ip_tunnel_info *info_a = lwt_tun_info(a); struct ip_tunnel_info *info_b = lwt_tun_info(b); return memcmp(info_a, info_b, sizeof(info_a->key)) || info_a->mode != info_b->mode || info_a->options_len != info_b->options_len || memcmp(ip_tunnel_info_opts(info_a), ip_tunnel_info_opts(info_b), info_a->options_len); } static const struct lwtunnel_encap_ops ip_tun_lwt_ops = { .build_state = ip_tun_build_state, .destroy_state = ip_tun_destroy_state, .fill_encap = ip_tun_fill_encap_info, .get_encap_size = ip_tun_encap_nlsize, .cmp_encap = ip_tun_cmp_encap, .owner = THIS_MODULE, }; static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = { [LWTUNNEL_IP6_UNSPEC] = { .strict_start_type = LWTUNNEL_IP6_OPTS }, [LWTUNNEL_IP6_ID] = { .type = NLA_U64 }, [LWTUNNEL_IP6_DST] = { .len = sizeof(struct in6_addr) }, [LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) }, [LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 }, [LWTUNNEL_IP6_TC] = { .type = NLA_U8 }, [LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 }, [LWTUNNEL_IP6_OPTS] = { .type = NLA_NESTED }, }; static int ip6_tun_build_state(struct net *net, struct nlattr *attr, unsigned int family, const void *cfg, struct lwtunnel_state **ts, struct netlink_ext_ack *extack) { struct nlattr *tb[LWTUNNEL_IP6_MAX + 1]; struct lwtunnel_state *new_state; struct ip_tunnel_info *tun_info; int err, opt_len; err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP6_MAX, attr, ip6_tun_policy, extack); if (err < 0) return err; opt_len = ip_tun_get_optlen(tb[LWTUNNEL_IP6_OPTS], extack); if (opt_len < 0) return opt_len; new_state = lwtunnel_state_alloc(sizeof(*tun_info) + opt_len); if (!new_state) return -ENOMEM; new_state->type = LWTUNNEL_ENCAP_IP6; tun_info = lwt_tun_info(new_state); err = ip_tun_set_opts(tb[LWTUNNEL_IP6_OPTS], tun_info, extack); if (err < 0) { lwtstate_free(new_state); return err; } if (tb[LWTUNNEL_IP6_ID]) tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP6_ID]); if (tb[LWTUNNEL_IP6_DST]) tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]); if (tb[LWTUNNEL_IP6_SRC]) tun_info->key.u.ipv6.src = nla_get_in6_addr(tb[LWTUNNEL_IP6_SRC]); if (tb[LWTUNNEL_IP6_HOPLIMIT]) tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP6_HOPLIMIT]); if (tb[LWTUNNEL_IP6_TC]) tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]); if (tb[LWTUNNEL_IP6_FLAGS]) { IP_TUNNEL_DECLARE_FLAGS(flags); __be16 data; data = nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]); ip_tunnel_flags_from_be16(flags, data); ip_tunnel_clear_options_present(flags); ip_tunnel_flags_or(tun_info->key.tun_flags, tun_info->key.tun_flags, flags); } tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6; tun_info->options_len = opt_len; *ts = new_state; return 0; } static int ip6_tun_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwtstate) { struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate); if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id, LWTUNNEL_IP6_PAD) || nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) || nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) || nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) || nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, ip_tunnel_flags_to_be16(tun_info->key.tun_flags)) || ip_tun_fill_encap_opts(skb, LWTUNNEL_IP6_OPTS, tun_info)) return -ENOMEM; return 0; } static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate) { return nla_total_size_64bit(8) /* LWTUNNEL_IP6_ID */ + nla_total_size(16) /* LWTUNNEL_IP6_DST */ + nla_total_size(16) /* LWTUNNEL_IP6_SRC */ + nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */ + nla_total_size(1) /* LWTUNNEL_IP6_TC */ + nla_total_size(2) /* LWTUNNEL_IP6_FLAGS */ + ip_tun_opts_nlsize(lwt_tun_info(lwtstate)); /* LWTUNNEL_IP6_OPTS */ } static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = { .build_state = ip6_tun_build_state, .fill_encap = ip6_tun_fill_encap_info, .get_encap_size = ip6_tun_encap_nlsize, .cmp_encap = ip_tun_cmp_encap, .owner = THIS_MODULE, }; void __init ip_tunnel_core_init(void) { /* If you land here, make sure whether increasing ip_tunnel_info's * options_len is a reasonable choice with its usage in front ends * (f.e., it's part of flow keys, etc). */ BUILD_BUG_ON(IP_TUNNEL_OPTS_MAX != 255); lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP); lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6); } DEFINE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt); EXPORT_SYMBOL(ip_tunnel_metadata_cnt); void ip_tunnel_need_metadata(void) { static_branch_inc(&ip_tunnel_metadata_cnt); } EXPORT_SYMBOL_GPL(ip_tunnel_need_metadata); void ip_tunnel_unneed_metadata(void) { static_branch_dec(&ip_tunnel_metadata_cnt); } EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata); /* Returns either the correct skb->protocol value, or 0 if invalid. */ __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb) { if (skb_network_header(skb) >= skb->head && (skb_network_header(skb) + sizeof(struct iphdr)) <= skb_tail_pointer(skb) && ip_hdr(skb)->version == 4) return htons(ETH_P_IP); if (skb_network_header(skb) >= skb->head && (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= skb_tail_pointer(skb) && ipv6_hdr(skb)->version == 6) return htons(ETH_P_IPV6); return 0; } EXPORT_SYMBOL(ip_tunnel_parse_protocol); const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol }; EXPORT_SYMBOL(ip_tunnel_header_ops); /* This function returns true when ENCAP attributes are present in the nl msg */ bool ip_tunnel_netlink_encap_parms(struct nlattr *data[], struct ip_tunnel_encap *encap) { bool ret = false; memset(encap, 0, sizeof(*encap)); if (!data) return ret; if (data[IFLA_IPTUN_ENCAP_TYPE]) { ret = true; encap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]); } if (data[IFLA_IPTUN_ENCAP_FLAGS]) { ret = true; encap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]); } if (data[IFLA_IPTUN_ENCAP_SPORT]) { ret = true; encap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]); } if (data[IFLA_IPTUN_ENCAP_DPORT]) { ret = true; encap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]); } return ret; } EXPORT_SYMBOL_GPL(ip_tunnel_netlink_encap_parms); void ip_tunnel_netlink_parms(struct nlattr *data[], struct ip_tunnel_parm_kern *parms) { if (data[IFLA_IPTUN_LINK]) parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]); if (data[IFLA_IPTUN_LOCAL]) parms->iph.saddr = nla_get_be32(data[IFLA_IPTUN_LOCAL]); if (data[IFLA_IPTUN_REMOTE]) parms->iph.daddr = nla_get_be32(data[IFLA_IPTUN_REMOTE]); if (data[IFLA_IPTUN_TTL]) { parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]); if (parms->iph.ttl) parms->iph.frag_off = htons(IP_DF); } if (data[IFLA_IPTUN_TOS]) parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]); if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC])) parms->iph.frag_off = htons(IP_DF); if (data[IFLA_IPTUN_FLAGS]) { __be16 flags; flags = nla_get_be16(data[IFLA_IPTUN_FLAGS]); ip_tunnel_flags_from_be16(parms->i_flags, flags); } if (data[IFLA_IPTUN_PROTO]) parms->iph.protocol = nla_get_u8(data[IFLA_IPTUN_PROTO]); } EXPORT_SYMBOL_GPL(ip_tunnel_netlink_parms);
4130 2969 2 164 7 430 5022 23 5163 163 8 913 45 5018 5011 4955 431 86 127 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM block #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_BLOCK_H #include <linux/blktrace_api.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> #include <linux/tracepoint.h> #include <uapi/linux/ioprio.h> #define RWBS_LEN 8 #define IOPRIO_CLASS_STRINGS \ { IOPRIO_CLASS_NONE, "none" }, \ { IOPRIO_CLASS_RT, "rt" }, \ { IOPRIO_CLASS_BE, "be" }, \ { IOPRIO_CLASS_IDLE, "idle" }, \ { IOPRIO_CLASS_INVALID, "invalid"} #ifdef CONFIG_BUFFER_HEAD DECLARE_EVENT_CLASS(block_buffer, TP_PROTO(struct buffer_head *bh), TP_ARGS(bh), TP_STRUCT__entry ( __field( dev_t, dev ) __field( sector_t, sector ) __field( size_t, size ) ), TP_fast_assign( __entry->dev = bh->b_bdev->bd_dev; __entry->sector = bh->b_blocknr; __entry->size = bh->b_size; ), TP_printk("%d,%d sector=%llu size=%zu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->sector, __entry->size ) ); /** * block_touch_buffer - mark a buffer accessed * @bh: buffer_head being touched * * Called from touch_buffer(). */ DEFINE_EVENT(block_buffer, block_touch_buffer, TP_PROTO(struct buffer_head *bh), TP_ARGS(bh) ); /** * block_dirty_buffer - mark a buffer dirty * @bh: buffer_head being dirtied * * Called from mark_buffer_dirty(). */ DEFINE_EVENT(block_buffer, block_dirty_buffer, TP_PROTO(struct buffer_head *bh), TP_ARGS(bh) ); #endif /* CONFIG_BUFFER_HEAD */ /** * block_rq_requeue - place block IO request back on a queue * @rq: block IO operation request * * The block operation request @rq is being placed back into queue * @q. For some reason the request was not completed and needs to be * put back in the queue. */ TRACE_EVENT(block_rq_requeue, TP_PROTO(struct request *rq), TP_ARGS(rq), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( unsigned short, ioprio ) __array( char, rwbs, RWBS_LEN ) __dynamic_array( char, cmd, 1 ) ), TP_fast_assign( __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0; __entry->sector = blk_rq_trace_sector(rq); __entry->nr_sector = blk_rq_trace_nr_sectors(rq); __entry->ioprio = rq->ioprio; blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; ), TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, __get_str(cmd), (unsigned long long)__entry->sector, __entry->nr_sector, __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio), IOPRIO_CLASS_STRINGS), IOPRIO_PRIO_HINT(__entry->ioprio), IOPRIO_PRIO_LEVEL(__entry->ioprio), 0) ); DECLARE_EVENT_CLASS(block_rq_completion, TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes), TP_ARGS(rq, error, nr_bytes), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( int , error ) __field( unsigned short, ioprio ) __array( char, rwbs, RWBS_LEN ) __dynamic_array( char, cmd, 1 ) ), TP_fast_assign( __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0; __entry->sector = blk_rq_pos(rq); __entry->nr_sector = nr_bytes >> 9; __entry->error = blk_status_to_errno(error); __entry->ioprio = rq->ioprio; blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; ), TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, __get_str(cmd), (unsigned long long)__entry->sector, __entry->nr_sector, __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio), IOPRIO_CLASS_STRINGS), IOPRIO_PRIO_HINT(__entry->ioprio), IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->error) ); /** * block_rq_complete - block IO operation completed by device driver * @rq: block operations request * @error: status code * @nr_bytes: number of completed bytes * * The block_rq_complete tracepoint event indicates that some portion * of operation request has been completed by the device driver. If * the @rq->bio is %NULL, then there is absolutely no additional work to * do for the request. If @rq->bio is non-NULL then there is * additional work required to complete the request. */ DEFINE_EVENT(block_rq_completion, block_rq_complete, TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes), TP_ARGS(rq, error, nr_bytes) ); /** * block_rq_error - block IO operation error reported by device driver * @rq: block operations request * @error: status code * @nr_bytes: number of completed bytes * * The block_rq_error tracepoint event indicates that some portion * of operation request has failed as reported by the device driver. */ DEFINE_EVENT(block_rq_completion, block_rq_error, TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes), TP_ARGS(rq, error, nr_bytes) ); DECLARE_EVENT_CLASS(block_rq, TP_PROTO(struct request *rq), TP_ARGS(rq), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( unsigned int, bytes ) __field( unsigned short, ioprio ) __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) __dynamic_array( char, cmd, 1 ) ), TP_fast_assign( __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0; __entry->sector = blk_rq_trace_sector(rq); __entry->nr_sector = blk_rq_trace_nr_sectors(rq); __entry->bytes = blk_rq_bytes(rq); __entry->ioprio = rq->ioprio; blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("%d,%d %s %u (%s) %llu + %u %s,%u,%u [%s]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, __entry->bytes, __get_str(cmd), (unsigned long long)__entry->sector, __entry->nr_sector, __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio), IOPRIO_CLASS_STRINGS), IOPRIO_PRIO_HINT(__entry->ioprio), IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->comm) ); /** * block_rq_insert - insert block operation request into queue * @rq: block IO operation request * * Called immediately before block operation request @rq is inserted * into queue @q. The fields in the operation request @rq struct can * be examined to determine which device and sectors the pending * operation would access. */ DEFINE_EVENT(block_rq, block_rq_insert, TP_PROTO(struct request *rq), TP_ARGS(rq) ); /** * block_rq_issue - issue pending block IO request operation to device driver * @rq: block IO operation request * * Called when block operation request @rq from queue @q is sent to a * device driver for processing. */ DEFINE_EVENT(block_rq, block_rq_issue, TP_PROTO(struct request *rq), TP_ARGS(rq) ); /** * block_rq_merge - merge request with another one in the elevator * @rq: block IO operation request * * Called when block operation request @rq from queue @q is merged to another * request queued in the elevator. */ DEFINE_EVENT(block_rq, block_rq_merge, TP_PROTO(struct request *rq), TP_ARGS(rq) ); /** * block_io_start - insert a request for execution * @rq: block IO operation request * * Called when block operation request @rq is queued for execution */ DEFINE_EVENT(block_rq, block_io_start, TP_PROTO(struct request *rq), TP_ARGS(rq) ); /** * block_io_done - block IO operation request completed * @rq: block IO operation request * * Called when block operation request @rq is completed */ DEFINE_EVENT(block_rq, block_io_done, TP_PROTO(struct request *rq), TP_ARGS(rq) ); /** * block_bio_complete - completed all work on the block operation * @q: queue holding the block operation * @bio: block operation completed * * This tracepoint indicates there is no further work to do on this * block IO operation @bio. */ TRACE_EVENT(block_bio_complete, TP_PROTO(struct request_queue *q, struct bio *bio), TP_ARGS(q, bio), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned, nr_sector ) __field( int, error ) __array( char, rwbs, RWBS_LEN) ), TP_fast_assign( __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); __entry->error = blk_status_to_errno(bio->bi_status); blk_fill_rwbs(__entry->rwbs, bio->bi_opf); ), TP_printk("%d,%d %s %llu + %u [%d]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, __entry->error) ); DECLARE_EVENT_CLASS(block_bio, TP_PROTO(struct bio *bio), TP_ARGS(bio), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); blk_fill_rwbs(__entry->rwbs, bio->bi_opf); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("%d,%d %s %llu + %u [%s]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, __entry->comm) ); /** * block_bio_bounce - used bounce buffer when processing block operation * @bio: block operation * * A bounce buffer was used to handle the block operation @bio in @q. * This occurs when hardware limitations prevent a direct transfer of * data between the @bio data memory area and the IO device. Use of a * bounce buffer requires extra copying of data and decreases * performance. */ DEFINE_EVENT(block_bio, block_bio_bounce, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); /** * block_bio_backmerge - merging block operation to the end of an existing operation * @bio: new block operation to merge * * Merging block request @bio to the end of an existing block request. */ DEFINE_EVENT(block_bio, block_bio_backmerge, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); /** * block_bio_frontmerge - merging block operation to the beginning of an existing operation * @bio: new block operation to merge * * Merging block IO operation @bio to the beginning of an existing block request. */ DEFINE_EVENT(block_bio, block_bio_frontmerge, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); /** * block_bio_queue - putting new block IO operation in queue * @bio: new block operation * * About to place the block IO operation @bio into queue @q. */ DEFINE_EVENT(block_bio, block_bio_queue, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); /** * block_getrq - get a free request entry in queue for block IO operations * @bio: pending block IO operation (can be %NULL) * * A request struct has been allocated to handle the block IO operation @bio. */ DEFINE_EVENT(block_bio, block_getrq, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); /** * block_plug - keep operations requests in request queue * @q: request queue to plug * * Plug the request queue @q. Do not allow block operation requests * to be sent to the device driver. Instead, accumulate requests in * the queue to improve throughput performance of the block device. */ TRACE_EVENT(block_plug, TP_PROTO(struct request_queue *q), TP_ARGS(q), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("[%s]", __entry->comm) ); DECLARE_EVENT_CLASS(block_unplug, TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), TP_ARGS(q, depth, explicit), TP_STRUCT__entry( __field( int, nr_rq ) __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( __entry->nr_rq = depth; memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) ); /** * block_unplug - release of operations requests in request queue * @q: request queue to unplug * @depth: number of requests just added to the queue * @explicit: whether this was an explicit unplug, or one from schedule() * * Unplug request queue @q because device driver is scheduled to work * on elements in the request queue. */ DEFINE_EVENT(block_unplug, block_unplug, TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), TP_ARGS(q, depth, explicit) ); /** * block_split - split a single bio struct into two bio structs * @bio: block operation being split * @new_sector: The starting sector for the new bio * * The bio request @bio needs to be split into two bio requests. The newly * created @bio request starts at @new_sector. This split may be required due to * hardware limitations such as operation crossing device boundaries in a RAID * system. */ TRACE_EVENT(block_split, TP_PROTO(struct bio *bio, unsigned int new_sector), TP_ARGS(bio, new_sector), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( sector_t, new_sector ) __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->new_sector = new_sector; blk_fill_rwbs(__entry->rwbs, bio->bi_opf); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("%d,%d %s %llu / %llu [%s]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, (unsigned long long)__entry->new_sector, __entry->comm) ); /** * block_bio_remap - map request for a logical device to the raw device * @bio: revised operation * @dev: original device for the operation * @from: original sector for the operation * * An operation for a logical device has been mapped to the * raw block device. */ TRACE_EVENT(block_bio_remap, TP_PROTO(struct bio *bio, dev_t dev, sector_t from), TP_ARGS(bio, dev, from), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( dev_t, old_dev ) __field( sector_t, old_sector ) __array( char, rwbs, RWBS_LEN) ), TP_fast_assign( __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); __entry->old_dev = dev; __entry->old_sector = from; blk_fill_rwbs(__entry->rwbs, bio->bi_opf); ), TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, MAJOR(__entry->old_dev), MINOR(__entry->old_dev), (unsigned long long)__entry->old_sector) ); /** * block_rq_remap - map request for a block operation request * @rq: block IO operation request * @dev: device for the operation * @from: original sector for the operation * * The block operation request @rq in @q has been remapped. The block * operation request @rq holds the current information and @from hold * the original sector. */ TRACE_EVENT(block_rq_remap, TP_PROTO(struct request *rq, dev_t dev, sector_t from), TP_ARGS(rq, dev, from), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( dev_t, old_dev ) __field( sector_t, old_sector ) __field( unsigned int, nr_bios ) __array( char, rwbs, RWBS_LEN) ), TP_fast_assign( __entry->dev = disk_devt(rq->q->disk); __entry->sector = blk_rq_pos(rq); __entry->nr_sector = blk_rq_sectors(rq); __entry->old_dev = dev; __entry->old_sector = from; __entry->nr_bios = blk_rq_count_bios(rq); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); ), TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, MAJOR(__entry->old_dev), MINOR(__entry->old_dev), (unsigned long long)__entry->old_sector, __entry->nr_bios) ); #endif /* _TRACE_BLOCK_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
1 1 4 4 2 2 1 1 1 1 3 2 1 1 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2015, Marvell International Ltd. * * Inspired (hugely) by HCI LDISC implementation in Bluetooth. * * Copyright (C) 2000-2001 Qualcomm Incorporated * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com> * Copyright (C) 2004-2005 Marcel Holtmann <marcel@holtmann.org> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/signal.h> #include <linux/ioctl.h> #include <linux/skbuff.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> /* TX states */ #define NCI_UART_SENDING 1 #define NCI_UART_TX_WAKEUP 2 static struct nci_uart *nci_uart_drivers[NCI_UART_DRIVER_MAX]; static inline struct sk_buff *nci_uart_dequeue(struct nci_uart *nu) { struct sk_buff *skb = nu->tx_skb; if (!skb) skb = skb_dequeue(&nu->tx_q); else nu->tx_skb = NULL; return skb; } static inline int nci_uart_queue_empty(struct nci_uart *nu) { if (nu->tx_skb) return 0; return skb_queue_empty(&nu->tx_q); } static int nci_uart_tx_wakeup(struct nci_uart *nu) { if (test_and_set_bit(NCI_UART_SENDING, &nu->tx_state)) { set_bit(NCI_UART_TX_WAKEUP, &nu->tx_state); return 0; } schedule_work(&nu->write_work); return 0; } static void nci_uart_write_work(struct work_struct *work) { struct nci_uart *nu = container_of(work, struct nci_uart, write_work); struct tty_struct *tty = nu->tty; struct sk_buff *skb; restart: clear_bit(NCI_UART_TX_WAKEUP, &nu->tx_state); if (nu->ops.tx_start) nu->ops.tx_start(nu); while ((skb = nci_uart_dequeue(nu))) { int len; set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); len = tty->ops->write(tty, skb->data, skb->len); skb_pull(skb, len); if (skb->len) { nu->tx_skb = skb; break; } kfree_skb(skb); } if (test_bit(NCI_UART_TX_WAKEUP, &nu->tx_state)) goto restart; if (nu->ops.tx_done && nci_uart_queue_empty(nu)) nu->ops.tx_done(nu); clear_bit(NCI_UART_SENDING, &nu->tx_state); } static int nci_uart_set_driver(struct tty_struct *tty, unsigned int driver) { struct nci_uart *nu = NULL; int ret; if (driver >= NCI_UART_DRIVER_MAX) return -EINVAL; if (!nci_uart_drivers[driver]) return -ENOENT; nu = kzalloc(sizeof(*nu), GFP_KERNEL); if (!nu) return -ENOMEM; memcpy(nu, nci_uart_drivers[driver], sizeof(struct nci_uart)); nu->tty = tty; tty->disc_data = nu; skb_queue_head_init(&nu->tx_q); INIT_WORK(&nu->write_work, nci_uart_write_work); spin_lock_init(&nu->rx_lock); ret = nu->ops.open(nu); if (ret) { tty->disc_data = NULL; kfree(nu); } else if (!try_module_get(nu->owner)) { nu->ops.close(nu); tty->disc_data = NULL; kfree(nu); return -ENOENT; } return ret; } /* ------ LDISC part ------ */ /* nci_uart_tty_open * * Called when line discipline changed to NCI_UART. * * Arguments: * tty pointer to tty info structure * Return Value: * 0 if success, otherwise error code */ static int nci_uart_tty_open(struct tty_struct *tty) { /* Error if the tty has no write op instead of leaving an exploitable * hole */ if (!tty->ops->write) return -EOPNOTSUPP; tty->disc_data = NULL; tty->receive_room = 65536; /* Flush any pending characters in the driver */ tty_driver_flush_buffer(tty); return 0; } /* nci_uart_tty_close() * * Called when the line discipline is changed to something * else, the tty is closed, or the tty detects a hangup. */ static void nci_uart_tty_close(struct tty_struct *tty) { struct nci_uart *nu = tty->disc_data; /* Detach from the tty */ tty->disc_data = NULL; if (!nu) return; kfree_skb(nu->tx_skb); kfree_skb(nu->rx_skb); skb_queue_purge(&nu->tx_q); nu->ops.close(nu); nu->tty = NULL; module_put(nu->owner); cancel_work_sync(&nu->write_work); kfree(nu); } /* nci_uart_tty_wakeup() * * Callback for transmit wakeup. Called when low level * device driver can accept more send data. * * Arguments: tty pointer to associated tty instance data * Return Value: None */ static void nci_uart_tty_wakeup(struct tty_struct *tty) { struct nci_uart *nu = tty->disc_data; if (!nu) return; clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); if (tty != nu->tty) return; nci_uart_tx_wakeup(nu); } /* -- Default recv_buf handler -- * * This handler supposes that NCI frames are sent over UART link without any * framing. It reads NCI header, retrieve the packet size and once all packet * bytes are received it passes it to nci_uart driver for processing. */ static int nci_uart_default_recv_buf(struct nci_uart *nu, const u8 *data, int count) { int chunk_len; if (!nu->ndev) { nfc_err(nu->tty->dev, "receive data from tty but no NCI dev is attached yet, drop buffer\n"); return 0; } /* Decode all incoming data in packets * and enqueue then for processing. */ while (count > 0) { /* If this is the first data of a packet, allocate a buffer */ if (!nu->rx_skb) { nu->rx_packet_len = -1; nu->rx_skb = nci_skb_alloc(nu->ndev, NCI_MAX_PACKET_SIZE, GFP_ATOMIC); if (!nu->rx_skb) return -ENOMEM; } /* Eat byte after byte till full packet header is received */ if (nu->rx_skb->len < NCI_CTRL_HDR_SIZE) { skb_put_u8(nu->rx_skb, *data++); --count; continue; } /* Header was received but packet len was not read */ if (nu->rx_packet_len < 0) nu->rx_packet_len = NCI_CTRL_HDR_SIZE + nci_plen(nu->rx_skb->data); /* Compute how many bytes are missing and how many bytes can * be consumed. */ chunk_len = nu->rx_packet_len - nu->rx_skb->len; if (count < chunk_len) chunk_len = count; skb_put_data(nu->rx_skb, data, chunk_len); data += chunk_len; count -= chunk_len; /* Check if packet is fully received */ if (nu->rx_packet_len == nu->rx_skb->len) { /* Pass RX packet to driver */ if (nu->ops.recv(nu, nu->rx_skb) != 0) nfc_err(nu->tty->dev, "corrupted RX packet\n"); /* Next packet will be a new one */ nu->rx_skb = NULL; } } return 0; } /* nci_uart_tty_receive() * * Called by tty low level driver when receive data is * available. * * Arguments: tty pointer to tty instance data * data pointer to received data * flags pointer to flags for data * count count of received data in bytes * * Return Value: None */ static void nci_uart_tty_receive(struct tty_struct *tty, const u8 *data, const u8 *flags, size_t count) { struct nci_uart *nu = tty->disc_data; if (!nu || tty != nu->tty) return; spin_lock(&nu->rx_lock); nci_uart_default_recv_buf(nu, data, count); spin_unlock(&nu->rx_lock); tty_unthrottle(tty); } /* nci_uart_tty_ioctl() * * Process IOCTL system call for the tty device. * * Arguments: * * tty pointer to tty instance data * cmd IOCTL command code * arg argument for IOCTL call (cmd dependent) * * Return Value: Command dependent */ static int nci_uart_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct nci_uart *nu = tty->disc_data; int err = 0; switch (cmd) { case NCIUARTSETDRIVER: if (!nu) return nci_uart_set_driver(tty, (unsigned int)arg); else return -EBUSY; break; default: err = n_tty_ioctl_helper(tty, cmd, arg); break; } return err; } /* We don't provide read/write/poll interface for user space. */ static ssize_t nci_uart_tty_read(struct tty_struct *tty, struct file *file, u8 *buf, size_t nr, void **cookie, unsigned long offset) { return 0; } static ssize_t nci_uart_tty_write(struct tty_struct *tty, struct file *file, const u8 *data, size_t count) { return 0; } static int nci_uart_send(struct nci_uart *nu, struct sk_buff *skb) { /* Queue TX packet */ skb_queue_tail(&nu->tx_q, skb); /* Try to start TX (if possible) */ nci_uart_tx_wakeup(nu); return 0; } int nci_uart_register(struct nci_uart *nu) { if (!nu || !nu->ops.open || !nu->ops.recv || !nu->ops.close) return -EINVAL; /* Set the send callback */ nu->ops.send = nci_uart_send; /* Add this driver in the driver list */ if (nci_uart_drivers[nu->driver]) { pr_err("driver %d is already registered\n", nu->driver); return -EBUSY; } nci_uart_drivers[nu->driver] = nu; pr_info("NCI uart driver '%s [%d]' registered\n", nu->name, nu->driver); return 0; } EXPORT_SYMBOL_GPL(nci_uart_register); void nci_uart_unregister(struct nci_uart *nu) { pr_info("NCI uart driver '%s [%d]' unregistered\n", nu->name, nu->driver); /* Remove this driver from the driver list */ nci_uart_drivers[nu->driver] = NULL; } EXPORT_SYMBOL_GPL(nci_uart_unregister); void nci_uart_set_config(struct nci_uart *nu, int baudrate, int flow_ctrl) { struct ktermios new_termios; if (!nu->tty) return; down_read(&nu->tty->termios_rwsem); new_termios = nu->tty->termios; up_read(&nu->tty->termios_rwsem); tty_termios_encode_baud_rate(&new_termios, baudrate, baudrate); if (flow_ctrl) new_termios.c_cflag |= CRTSCTS; else new_termios.c_cflag &= ~CRTSCTS; tty_set_termios(nu->tty, &new_termios); } EXPORT_SYMBOL_GPL(nci_uart_set_config); static struct tty_ldisc_ops nci_uart_ldisc = { .owner = THIS_MODULE, .num = N_NCI, .name = "n_nci", .open = nci_uart_tty_open, .close = nci_uart_tty_close, .read = nci_uart_tty_read, .write = nci_uart_tty_write, .receive_buf = nci_uart_tty_receive, .write_wakeup = nci_uart_tty_wakeup, .ioctl = nci_uart_tty_ioctl, .compat_ioctl = nci_uart_tty_ioctl, }; static int __init nci_uart_init(void) { return tty_register_ldisc(&nci_uart_ldisc); } static void __exit nci_uart_exit(void) { tty_unregister_ldisc(&nci_uart_ldisc); } module_init(nci_uart_init); module_exit(nci_uart_exit); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("NFC NCI UART driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_NCI);
15 3 13 13 292 81 28 27 5 24 13 62 15 51 51 28 11 13 31 31 21 21 28 30 25 293 281 6 2 44 61 291 3 32 7 25 2 293 58 282 59 58 42 1 34 34 34 21 19 2 16 2 21 23 23 87 5 80 23 23 14 77 16 61 20 52 28 55 9 13 28 68 1 7 24 11 33 6 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hfsplus/extents.c * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) * (C) 2003 Ardis Technologies <roman@ardistech.com> * * Handling of Extents both in catalog and extents overflow trees */ #include <linux/errno.h> #include <linux/fs.h> #include <linux/pagemap.h> #include "hfsplus_fs.h" #include "hfsplus_raw.h" /* Compare two extents keys, returns 0 on same, pos/neg for difference */ int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1, const hfsplus_btree_key *k2) { __be32 k1id, k2id; __be32 k1s, k2s; k1id = k1->ext.cnid; k2id = k2->ext.cnid; if (k1id != k2id) return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1; if (k1->ext.fork_type != k2->ext.fork_type) return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1; k1s = k1->ext.start_block; k2s = k2->ext.start_block; if (k1s == k2s) return 0; return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1; } static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid, u32 block, u8 type) { key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2); key->ext.cnid = cpu_to_be32(cnid); key->ext.start_block = cpu_to_be32(block); key->ext.fork_type = type; key->ext.pad = 0; } static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off) { int i; u32 count; for (i = 0; i < 8; ext++, i++) { count = be32_to_cpu(ext->block_count); if (off < count) return be32_to_cpu(ext->start_block) + off; off -= count; } /* panic? */ return 0; } static int hfsplus_ext_block_count(struct hfsplus_extent *ext) { int i; u32 count = 0; for (i = 0; i < 8; ext++, i++) count += be32_to_cpu(ext->block_count); return count; } static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext) { int i; ext += 7; for (i = 0; i < 7; ext--, i++) if (ext->block_count) break; return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count); } static int __hfsplus_ext_write_extent(struct inode *inode, struct hfs_find_data *fd) { struct hfsplus_inode_info *hip = HFSPLUS_I(inode); int res; WARN_ON(!mutex_is_locked(&hip->extents_lock)); hfsplus_ext_build_key(fd->search_key, inode->i_ino, hip->cached_start, HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); res = hfs_brec_find(fd, hfs_find_rec_by_key); if (hip->extent_state & HFSPLUS_EXT_NEW) { if (res != -ENOENT) return res; /* Fail early and avoid ENOSPC during the btree operation */ res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1); if (res) return res; hfs_brec_insert(fd, hip->cached_extents, sizeof(hfsplus_extent_rec)); hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW); } else { if (res) return res; hfs_bnode_write(fd->bnode, hip->cached_extents, fd->entryoffset, fd->entrylength); hip->extent_state &= ~HFSPLUS_EXT_DIRTY; } /* * We can't just use hfsplus_mark_inode_dirty here, because we * also get called from hfsplus_write_inode, which should not * redirty the inode. Instead the callers have to be careful * to explicily mark the inode dirty, too. */ set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags); return 0; } static int hfsplus_ext_write_extent_locked(struct inode *inode) { int res = 0; if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) { struct hfs_find_data fd; res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd); if (res) return res; res = __hfsplus_ext_write_extent(inode, &fd); hfs_find_exit(&fd); } return res; } int hfsplus_ext_write_extent(struct inode *inode) { int res; mutex_lock(&HFSPLUS_I(inode)->extents_lock); res = hfsplus_ext_write_extent_locked(inode); mutex_unlock(&HFSPLUS_I(inode)->extents_lock); return res; } static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd, struct hfsplus_extent *extent, u32 cnid, u32 block, u8 type) { int res; hfsplus_ext_build_key(fd->search_key, cnid, block, type); fd->key->ext.cnid = 0; res = hfs_brec_find(fd, hfs_find_rec_by_key); if (res && res != -ENOENT) return res; if (fd->key->ext.cnid != fd->search_key->ext.cnid || fd->key->ext.fork_type != fd->search_key->ext.fork_type) return -ENOENT; if (fd->entrylength != sizeof(hfsplus_extent_rec)) return -EIO; hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfsplus_extent_rec)); return 0; } static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block) { struct hfsplus_inode_info *hip = HFSPLUS_I(inode); int res; WARN_ON(!mutex_is_locked(&hip->extents_lock)); if (hip->extent_state & HFSPLUS_EXT_DIRTY) { res = __hfsplus_ext_write_extent(inode, fd); if (res) return res; } res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino, block, HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); if (!res) { hip->cached_start = be32_to_cpu(fd->key->ext.start_block); hip->cached_blocks = hfsplus_ext_block_count(hip->cached_extents); } else { hip->cached_start = hip->cached_blocks = 0; hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW); } return res; } static int hfsplus_ext_read_extent(struct inode *inode, u32 block) { struct hfsplus_inode_info *hip = HFSPLUS_I(inode); struct hfs_find_data fd; int res; if (block >= hip->cached_start && block < hip->cached_start + hip->cached_blocks) return 0; res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd); if (!res) { res = __hfsplus_ext_cache_extent(&fd, inode, block); hfs_find_exit(&fd); } return res; } /* Get a block at iblock for inode, possibly allocating if create */ int hfsplus_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { struct super_block *sb = inode->i_sb; struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); struct hfsplus_inode_info *hip = HFSPLUS_I(inode); int res = -EIO; u32 ablock, dblock, mask; sector_t sector; int was_dirty = 0; /* Convert inode block to disk allocation block */ ablock = iblock >> sbi->fs_shift; if (iblock >= hip->fs_blocks) { if (!create) return 0; if (iblock > hip->fs_blocks) return -EIO; if (ablock >= hip->alloc_blocks) { res = hfsplus_file_extend(inode, false); if (res) return res; } } else create = 0; if (ablock < hip->first_blocks) { dblock = hfsplus_ext_find_block(hip->first_extents, ablock); goto done; } if (inode->i_ino == HFSPLUS_EXT_CNID) return -EIO; mutex_lock(&hip->extents_lock); /* * hfsplus_ext_read_extent will write out a cached extent into * the extents btree. In that case we may have to mark the inode * dirty even for a pure read of an extent here. */ was_dirty = (hip->extent_state & HFSPLUS_EXT_DIRTY); res = hfsplus_ext_read_extent(inode, ablock); if (res) { mutex_unlock(&hip->extents_lock); return -EIO; } dblock = hfsplus_ext_find_block(hip->cached_extents, ablock - hip->cached_start); mutex_unlock(&hip->extents_lock); done: hfs_dbg(EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock); mask = (1 << sbi->fs_shift) - 1; sector = ((sector_t)dblock << sbi->fs_shift) + sbi->blockoffset + (iblock & mask); map_bh(bh_result, sb, sector); if (create) { set_buffer_new(bh_result); hip->phys_size += sb->s_blocksize; hip->fs_blocks++; inode_add_bytes(inode, sb->s_blocksize); } if (create || was_dirty) mark_inode_dirty(inode); return 0; } static void hfsplus_dump_extent(struct hfsplus_extent *extent) { int i; hfs_dbg(EXTENT, " "); for (i = 0; i < 8; i++) hfs_dbg_cont(EXTENT, " %u:%u", be32_to_cpu(extent[i].start_block), be32_to_cpu(extent[i].block_count)); hfs_dbg_cont(EXTENT, "\n"); } static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset, u32 alloc_block, u32 block_count) { u32 count, start; int i; hfsplus_dump_extent(extent); for (i = 0; i < 8; extent++, i++) { count = be32_to_cpu(extent->block_count); if (offset == count) { start = be32_to_cpu(extent->start_block); if (alloc_block != start + count) { if (++i >= 8) return -ENOSPC; extent++; extent->start_block = cpu_to_be32(alloc_block); } else block_count += count; extent->block_count = cpu_to_be32(block_count); return 0; } else if (offset < count) break; offset -= count; } /* panic? */ return -EIO; } static int hfsplus_free_extents(struct super_block *sb, struct hfsplus_extent *extent, u32 offset, u32 block_nr) { u32 count, start; int i; int err = 0; /* Mapping the allocation file may lock the extent tree */ WARN_ON(mutex_is_locked(&HFSPLUS_SB(sb)->ext_tree->tree_lock)); hfsplus_dump_extent(extent); for (i = 0; i < 8; extent++, i++) { count = be32_to_cpu(extent->block_count); if (offset == count) goto found; else if (offset < count) break; offset -= count; } /* panic? */ return -EIO; found: for (;;) { start = be32_to_cpu(extent->start_block); if (count <= block_nr) { err = hfsplus_block_free(sb, start, count); if (err) { pr_err("can't free extent\n"); hfs_dbg(EXTENT, " start: %u count: %u\n", start, count); } extent->block_count = 0; extent->start_block = 0; block_nr -= count; } else { count -= block_nr; err = hfsplus_block_free(sb, start + count, block_nr); if (err) { pr_err("can't free extent\n"); hfs_dbg(EXTENT, " start: %u count: %u\n", start, count); } extent->block_count = cpu_to_be32(count); block_nr = 0; } if (!block_nr || !i) { /* * Try to free all extents and * return only last error */ return err; } i--; extent--; count = be32_to_cpu(extent->block_count); } } int hfsplus_free_fork(struct super_block *sb, u32 cnid, struct hfsplus_fork_raw *fork, int type) { struct hfs_find_data fd; hfsplus_extent_rec ext_entry; u32 total_blocks, blocks, start; int res, i; total_blocks = be32_to_cpu(fork->total_blocks); if (!total_blocks) return 0; blocks = 0; for (i = 0; i < 8; i++) blocks += be32_to_cpu(fork->extents[i].block_count); res = hfsplus_free_extents(sb, fork->extents, blocks, blocks); if (res) return res; if (total_blocks == blocks) return 0; res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); if (res) return res; do { res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid, total_blocks, type); if (res) break; start = be32_to_cpu(fd.key->ext.start_block); hfs_brec_remove(&fd); mutex_unlock(&fd.tree->tree_lock); hfsplus_free_extents(sb, ext_entry, total_blocks - start, total_blocks); total_blocks = start; mutex_lock_nested(&fd.tree->tree_lock, hfsplus_btree_lock_class(fd.tree)); } while (total_blocks > blocks); hfs_find_exit(&fd); return res; } int hfsplus_file_extend(struct inode *inode, bool zeroout) { struct super_block *sb = inode->i_sb; struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); struct hfsplus_inode_info *hip = HFSPLUS_I(inode); u32 start, len, goal; int res; if (sbi->alloc_file->i_size * 8 < sbi->total_blocks - sbi->free_blocks + 8) { /* extend alloc file */ pr_err_ratelimited("extend alloc file! (%llu,%u,%u)\n", sbi->alloc_file->i_size * 8, sbi->total_blocks, sbi->free_blocks); return -ENOSPC; } mutex_lock(&hip->extents_lock); if (hip->alloc_blocks == hip->first_blocks) goal = hfsplus_ext_lastblock(hip->first_extents); else { res = hfsplus_ext_read_extent(inode, hip->alloc_blocks); if (res) goto out; goal = hfsplus_ext_lastblock(hip->cached_extents); } len = hip->clump_blocks; start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len); if (start >= sbi->total_blocks) { start = hfsplus_block_allocate(sb, goal, 0, &len); if (start >= goal) { res = -ENOSPC; goto out; } } if (zeroout) { res = sb_issue_zeroout(sb, start, len, GFP_NOFS); if (res) goto out; } hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); if (hip->alloc_blocks <= hip->first_blocks) { if (!hip->first_blocks) { hfs_dbg(EXTENT, "first extents\n"); /* no extents yet */ hip->first_extents[0].start_block = cpu_to_be32(start); hip->first_extents[0].block_count = cpu_to_be32(len); res = 0; } else { /* try to append to extents in inode */ res = hfsplus_add_extent(hip->first_extents, hip->alloc_blocks, start, len); if (res == -ENOSPC) goto insert_extent; } if (!res) { hfsplus_dump_extent(hip->first_extents); hip->first_blocks += len; } } else { res = hfsplus_add_extent(hip->cached_extents, hip->alloc_blocks - hip->cached_start, start, len); if (!res) { hfsplus_dump_extent(hip->cached_extents); hip->extent_state |= HFSPLUS_EXT_DIRTY; hip->cached_blocks += len; } else if (res == -ENOSPC) goto insert_extent; } out: if (!res) { hip->alloc_blocks += len; mutex_unlock(&hip->extents_lock); hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY); return 0; } mutex_unlock(&hip->extents_lock); return res; insert_extent: hfs_dbg(EXTENT, "insert new extent\n"); res = hfsplus_ext_write_extent_locked(inode); if (res) goto out; memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); hip->cached_extents[0].start_block = cpu_to_be32(start); hip->cached_extents[0].block_count = cpu_to_be32(len); hfsplus_dump_extent(hip->cached_extents); hip->extent_state |= HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW; hip->cached_start = hip->alloc_blocks; hip->cached_blocks = len; res = 0; goto out; } void hfsplus_file_truncate(struct inode *inode) { struct super_block *sb = inode->i_sb; struct hfsplus_inode_info *hip = HFSPLUS_I(inode); struct hfs_find_data fd; u32 alloc_cnt, blk_cnt, start; int res; hfs_dbg(INODE, "truncate: %lu, %llu -> %llu\n", inode->i_ino, (long long)hip->phys_size, inode->i_size); if (inode->i_size > hip->phys_size) { struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata = NULL; loff_t size = inode->i_size; res = hfsplus_write_begin(NULL, mapping, size, 0, &page, &fsdata); if (res) return; res = generic_write_end(NULL, mapping, size, 0, 0, page, fsdata); if (res < 0) return; mark_inode_dirty(inode); return; } else if (inode->i_size == hip->phys_size) return; blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >> HFSPLUS_SB(sb)->alloc_blksz_shift; mutex_lock(&hip->extents_lock); alloc_cnt = hip->alloc_blocks; if (blk_cnt == alloc_cnt) goto out_unlock; res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); if (res) { mutex_unlock(&hip->extents_lock); /* XXX: We lack error handling of hfsplus_file_truncate() */ return; } while (1) { if (alloc_cnt == hip->first_blocks) { mutex_unlock(&fd.tree->tree_lock); hfsplus_free_extents(sb, hip->first_extents, alloc_cnt, alloc_cnt - blk_cnt); hfsplus_dump_extent(hip->first_extents); hip->first_blocks = blk_cnt; mutex_lock_nested(&fd.tree->tree_lock, hfsplus_btree_lock_class(fd.tree)); break; } res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt); if (res) break; start = hip->cached_start; if (blk_cnt <= start) hfs_brec_remove(&fd); mutex_unlock(&fd.tree->tree_lock); hfsplus_free_extents(sb, hip->cached_extents, alloc_cnt - start, alloc_cnt - blk_cnt); hfsplus_dump_extent(hip->cached_extents); mutex_lock_nested(&fd.tree->tree_lock, hfsplus_btree_lock_class(fd.tree)); if (blk_cnt > start) { hip->extent_state |= HFSPLUS_EXT_DIRTY; break; } alloc_cnt = start; hip->cached_start = hip->cached_blocks = 0; hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW); } hfs_find_exit(&fd); hip->alloc_blocks = blk_cnt; out_unlock: mutex_unlock(&hip->extents_lock); hip->phys_size = inode->i_size; hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits); hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY); }
2 2 26 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 // SPDX-License-Identifier: GPL-2.0-or-later /* * uvc_entity.c -- USB Video Class driver * * Copyright (C) 2005-2011 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> #include "uvcvideo.h" static int uvc_mc_create_links(struct uvc_video_chain *chain, struct uvc_entity *entity) { const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE; struct media_entity *sink; unsigned int i; int ret; sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING) ? (entity->vdev ? &entity->vdev->entity : NULL) : &entity->subdev.entity; if (sink == NULL) return 0; for (i = 0; i < entity->num_pads; ++i) { struct media_entity *source; struct uvc_entity *remote; u8 remote_pad; if (!(entity->pads[i].flags & MEDIA_PAD_FL_SINK)) continue; remote = uvc_entity_by_id(chain->dev, entity->baSourceID[i]); if (remote == NULL || remote->num_pads == 0) return -EINVAL; source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING) ? (remote->vdev ? &remote->vdev->entity : NULL) : &remote->subdev.entity; if (source == NULL) continue; remote_pad = remote->num_pads - 1; ret = media_create_pad_link(source, remote_pad, sink, i, flags); if (ret < 0) return ret; } return 0; } static const struct v4l2_subdev_ops uvc_subdev_ops = { }; void uvc_mc_cleanup_entity(struct uvc_entity *entity) { if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) media_entity_cleanup(&entity->subdev.entity); else if (entity->vdev != NULL) media_entity_cleanup(&entity->vdev->entity); } static int uvc_mc_init_entity(struct uvc_video_chain *chain, struct uvc_entity *entity) { int ret; if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) { u32 function; v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops); strscpy(entity->subdev.name, entity->name, sizeof(entity->subdev.name)); switch (UVC_ENTITY_TYPE(entity)) { case UVC_VC_SELECTOR_UNIT: function = MEDIA_ENT_F_VID_MUX; break; case UVC_VC_PROCESSING_UNIT: case UVC_VC_EXTENSION_UNIT: /* For lack of a better option. */ function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; break; case UVC_COMPOSITE_CONNECTOR: case UVC_COMPONENT_CONNECTOR: function = MEDIA_ENT_F_CONN_COMPOSITE; break; case UVC_SVIDEO_CONNECTOR: function = MEDIA_ENT_F_CONN_SVIDEO; break; case UVC_ITT_CAMERA: function = MEDIA_ENT_F_CAM_SENSOR; break; case UVC_TT_VENDOR_SPECIFIC: case UVC_ITT_VENDOR_SPECIFIC: case UVC_ITT_MEDIA_TRANSPORT_INPUT: case UVC_OTT_VENDOR_SPECIFIC: case UVC_OTT_DISPLAY: case UVC_OTT_MEDIA_TRANSPORT_OUTPUT: case UVC_EXTERNAL_VENDOR_SPECIFIC: case UVC_EXT_GPIO_UNIT: default: function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; break; } entity->subdev.entity.function = function; ret = media_entity_pads_init(&entity->subdev.entity, entity->num_pads, entity->pads); if (ret < 0) return ret; ret = v4l2_device_register_subdev(&chain->dev->vdev, &entity->subdev); } else if (entity->vdev != NULL) { ret = media_entity_pads_init(&entity->vdev->entity, entity->num_pads, entity->pads); if (entity->flags & UVC_ENTITY_FLAG_DEFAULT) entity->vdev->entity.flags |= MEDIA_ENT_FL_DEFAULT; } else ret = 0; return ret; } int uvc_mc_register_entities(struct uvc_video_chain *chain) { struct uvc_entity *entity; int ret; list_for_each_entry(entity, &chain->entities, chain) { ret = uvc_mc_init_entity(chain, entity); if (ret < 0) { dev_info(&chain->dev->udev->dev, "Failed to initialize entity for entity %u\n", entity->id); return ret; } } list_for_each_entry(entity, &chain->entities, chain) { ret = uvc_mc_create_links(chain, entity); if (ret < 0) { dev_info(&chain->dev->udev->dev, "Failed to create links for entity %u\n", entity->id); return ret; } } return 0; }
2759 2759 2759 1 1 1 35 35 8 8 8 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 // SPDX-License-Identifier: GPL-2.0+ /* * Procedures for creating, accessing and interpreting the device tree. * * Paul Mackerras August 1996. * Copyright (C) 1996-2005 Paul Mackerras. * * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. * {engebret|bergner}@us.ibm.com * * Adapted for sparc and sparc64 by David S. Miller davem@davemloft.net * * Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and * Grant Likely. */ #define pr_fmt(fmt) "OF: " fmt #include <linux/cleanup.h> #include <linux/console.h> #include <linux/ctype.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_graph.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/proc_fs.h> #include "of_private.h" LIST_HEAD(aliases_lookup); struct device_node *of_root; EXPORT_SYMBOL(of_root); struct device_node *of_chosen; EXPORT_SYMBOL(of_chosen); struct device_node *of_aliases; struct device_node *of_stdout; static const char *of_stdout_options; struct kset *of_kset; /* * Used to protect the of_aliases, to hold off addition of nodes to sysfs. * This mutex must be held whenever modifications are being made to the * device tree. The of_{attach,detach}_node() and * of_{add,remove,update}_property() helpers make sure this happens. */ DEFINE_MUTEX(of_mutex); /* use when traversing tree through the child, sibling, * or parent members of struct device_node. */ DEFINE_RAW_SPINLOCK(devtree_lock); bool of_node_name_eq(const struct device_node *np, const char *name) { const char *node_name; size_t len; if (!np) return false; node_name = kbasename(np->full_name); len = strchrnul(node_name, '@') - node_name; return (strlen(name) == len) && (strncmp(node_name, name, len) == 0); } EXPORT_SYMBOL(of_node_name_eq); bool of_node_name_prefix(const struct device_node *np, const char *prefix) { if (!np) return false; return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0; } EXPORT_SYMBOL(of_node_name_prefix); static bool __of_node_is_type(const struct device_node *np, const char *type) { const char *match = __of_get_property(np, "device_type", NULL); return np && match && type && !strcmp(match, type); } int of_bus_n_addr_cells(struct device_node *np) { u32 cells; for (; np; np = np->parent) if (!of_property_read_u32(np, "#address-cells", &cells)) return cells; /* No #address-cells property for the root node */ return OF_ROOT_NODE_ADDR_CELLS_DEFAULT; } int of_n_addr_cells(struct device_node *np) { if (np->parent) np = np->parent; return of_bus_n_addr_cells(np); } EXPORT_SYMBOL(of_n_addr_cells); int of_bus_n_size_cells(struct device_node *np) { u32 cells; for (; np; np = np->parent) if (!of_property_read_u32(np, "#size-cells", &cells)) return cells; /* No #size-cells property for the root node */ return OF_ROOT_NODE_SIZE_CELLS_DEFAULT; } int of_n_size_cells(struct device_node *np) { if (np->parent) np = np->parent; return of_bus_n_size_cells(np); } EXPORT_SYMBOL(of_n_size_cells); #ifdef CONFIG_NUMA int __weak of_node_to_nid(struct device_node *np) { return NUMA_NO_NODE; } #endif #define OF_PHANDLE_CACHE_BITS 7 #define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS) static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ]; static u32 of_phandle_cache_hash(phandle handle) { return hash_32(handle, OF_PHANDLE_CACHE_BITS); } /* * Caller must hold devtree_lock. */ void __of_phandle_cache_inv_entry(phandle handle) { u32 handle_hash; struct device_node *np; if (!handle) return; handle_hash = of_phandle_cache_hash(handle); np = phandle_cache[handle_hash]; if (np && handle == np->phandle) phandle_cache[handle_hash] = NULL; } void __init of_core_init(void) { struct device_node *np; of_platform_register_reconfig_notifier(); /* Create the kset, and register existing nodes */ mutex_lock(&of_mutex); of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj); if (!of_kset) { mutex_unlock(&of_mutex); pr_err("failed to register existing nodes\n"); return; } for_each_of_allnodes(np) { __of_attach_node_sysfs(np); if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)]) phandle_cache[of_phandle_cache_hash(np->phandle)] = np; } mutex_unlock(&of_mutex); /* Symlink in /proc as required by userspace ABI */ if (of_root) proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); } static struct property *__of_find_property(const struct device_node *np, const char *name, int *lenp) { struct property *pp; if (!np) return NULL; for (pp = np->properties; pp; pp = pp->next) { if (of_prop_cmp(pp->name, name) == 0) { if (lenp) *lenp = pp->length; break; } } return pp; } struct property *of_find_property(const struct device_node *np, const char *name, int *lenp) { struct property *pp; unsigned long flags; raw_spin_lock_irqsave(&devtree_lock, flags); pp = __of_find_property(np, name, lenp); raw_spin_unlock_irqrestore(&devtree_lock, flags); return pp; } EXPORT_SYMBOL(of_find_property); struct device_node *__of_find_all_nodes(struct device_node *prev) { struct device_node *np; if (!prev) { np = of_root; } else if (prev->child) { np = prev->child; } else { /* Walk back up looking for a sibling, or the end of the structure */ np = prev; while (np->parent && !np->sibling) np = np->parent; np = np->sibling; /* Might be null at the end of the tree */ } return np; } /** * of_find_all_nodes - Get next node in global list * @prev: Previous node or NULL to start iteration * of_node_put() will be called on it * * Return: A node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_find_all_nodes(struct device_node *prev) { struct device_node *np; unsigned long flags; raw_spin_lock_irqsave(&devtree_lock, flags); np = __of_find_all_nodes(prev); of_node_get(np); of_node_put(prev); raw_spin_unlock_irqrestore(&devtree_lock, flags); return np; } EXPORT_SYMBOL(of_find_all_nodes); /* * Find a property with a given name for a given node * and return the value. */ const void *__of_get_property(const struct device_node *np, const char *name, int *lenp) { struct property *pp = __of_find_property(np, name, lenp); return pp ? pp->value : NULL; } /* * Find a property with a given name for a given node * and return the value. */ const void *of_get_property(const struct device_node *np, const char *name, int *lenp) { struct property *pp = of_find_property(np, name, lenp); return pp ? pp->value : NULL; } EXPORT_SYMBOL(of_get_property); /** * __of_device_is_compatible() - Check if the node matches given constraints * @device: pointer to node * @compat: required compatible string, NULL or "" for any match * @type: required device_type value, NULL or "" for any match * @name: required node name, NULL or "" for any match * * Checks if the given @compat, @type and @name strings match the * properties of the given @device. A constraints can be skipped by * passing NULL or an empty string as the constraint. * * Returns 0 for no match, and a positive integer on match. The return * value is a relative score with larger values indicating better * matches. The score is weighted for the most specific compatible value * to get the highest score. Matching type is next, followed by matching * name. Practically speaking, this results in the following priority * order for matches: * * 1. specific compatible && type && name * 2. specific compatible && type * 3. specific compatible && name * 4. specific compatible * 5. general compatible && type && name * 6. general compatible && type * 7. general compatible && name * 8. general compatible * 9. type && name * 10. type * 11. name */ static int __of_device_is_compatible(const struct device_node *device, const char *compat, const char *type, const char *name) { struct property *prop; const char *cp; int index = 0, score = 0; /* Compatible match has highest priority */ if (compat && compat[0]) { prop = __of_find_property(device, "compatible", NULL); for (cp = of_prop_next_string(prop, NULL); cp; cp = of_prop_next_string(prop, cp), index++) { if (of_compat_cmp(cp, compat, strlen(compat)) == 0) { score = INT_MAX/2 - (index << 2); break; } } if (!score) return 0; } /* Matching type is better than matching name */ if (type && type[0]) { if (!__of_node_is_type(device, type)) return 0; score += 2; } /* Matching name is a bit better than not */ if (name && name[0]) { if (!of_node_name_eq(device, name)) return 0; score++; } return score; } /** Checks if the given "compat" string matches one of the strings in * the device's "compatible" property */ int of_device_is_compatible(const struct device_node *device, const char *compat) { unsigned long flags; int res; raw_spin_lock_irqsave(&devtree_lock, flags); res = __of_device_is_compatible(device, compat, NULL, NULL); raw_spin_unlock_irqrestore(&devtree_lock, flags); return res; } EXPORT_SYMBOL(of_device_is_compatible); /** Checks if the device is compatible with any of the entries in * a NULL terminated array of strings. Returns the best match * score or 0. */ int of_device_compatible_match(const struct device_node *device, const char *const *compat) { unsigned int tmp, score = 0; if (!compat) return 0; while (*compat) { tmp = of_device_is_compatible(device, *compat); if (tmp > score) score = tmp; compat++; } return score; } EXPORT_SYMBOL_GPL(of_device_compatible_match); /** * of_machine_compatible_match - Test root of device tree against a compatible array * @compats: NULL terminated array of compatible strings to look for in root node's compatible property. * * Returns true if the root node has any of the given compatible values in its * compatible property. */ bool of_machine_compatible_match(const char *const *compats) { struct device_node *root; int rc = 0; root = of_find_node_by_path("/"); if (root) { rc = of_device_compatible_match(root, compats); of_node_put(root); } return rc != 0; } EXPORT_SYMBOL(of_machine_compatible_match); static bool __of_device_is_status(const struct device_node *device, const char * const*strings) { const char *status; int statlen; if (!device) return false; status = __of_get_property(device, "status", &statlen); if (status == NULL) return false; if (statlen > 0) { while (*strings) { unsigned int len = strlen(*strings); if ((*strings)[len - 1] == '-') { if (!strncmp(status, *strings, len)) return true; } else { if (!strcmp(status, *strings)) return true; } strings++; } } return false; } /** * __of_device_is_available - check if a device is available for use * * @device: Node to check for availability, with locks already held * * Return: True if the status property is absent or set to "okay" or "ok", * false otherwise */ static bool __of_device_is_available(const struct device_node *device) { static const char * const ok[] = {"okay", "ok", NULL}; if (!device) return false; return !__of_get_property(device, "status", NULL) || __of_device_is_status(device, ok); } /** * __of_device_is_reserved - check if a device is reserved * * @device: Node to check for availability, with locks already held * * Return: True if the status property is set to "reserved", false otherwise */ static bool __of_device_is_reserved(const struct device_node *device) { static const char * const reserved[] = {"reserved", NULL}; return __of_device_is_status(device, reserved); } /** * of_device_is_available - check if a device is available for use * * @device: Node to check for availability * * Return: True if the status property is absent or set to "okay" or "ok", * false otherwise */ bool of_device_is_available(const struct device_node *device) { unsigned long flags; bool res; raw_spin_lock_irqsave(&devtree_lock, flags); res = __of_device_is_available(device); raw_spin_unlock_irqrestore(&devtree_lock, flags); return res; } EXPORT_SYMBOL(of_device_is_available); /** * __of_device_is_fail - check if a device has status "fail" or "fail-..." * * @device: Node to check status for, with locks already held * * Return: True if the status property is set to "fail" or "fail-..." (for any * error code suffix), false otherwise */ static bool __of_device_is_fail(const struct device_node *device) { static const char * const fail[] = {"fail", "fail-", NULL}; return __of_device_is_status(device, fail); } /** * of_device_is_big_endian - check if a device has BE registers * * @device: Node to check for endianness * * Return: True if the device has a "big-endian" property, or if the kernel * was compiled for BE *and* the device has a "native-endian" property. * Returns false otherwise. * * Callers would nominally use ioread32be/iowrite32be if * of_device_is_big_endian() == true, or readl/writel otherwise. */ bool of_device_is_big_endian(const struct device_node *device) { if (of_property_read_bool(device, "big-endian")) return true; if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) && of_property_read_bool(device, "native-endian")) return true; return false; } EXPORT_SYMBOL(of_device_is_big_endian); /** * of_get_parent - Get a node's parent if any * @node: Node to get parent * * Return: A node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_get_parent(const struct device_node *node) { struct device_node *np; unsigned long flags; if (!node) return NULL; raw_spin_lock_irqsave(&devtree_lock, flags); np = of_node_get(node->parent); raw_spin_unlock_irqrestore(&devtree_lock, flags); return np; } EXPORT_SYMBOL(of_get_parent); /** * of_get_next_parent - Iterate to a node's parent * @node: Node to get parent of * * This is like of_get_parent() except that it drops the * refcount on the passed node, making it suitable for iterating * through a node's parents. * * Return: A node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_get_next_parent(struct device_node *node) { struct device_node *parent; unsigned long flags; if (!node) return NULL; raw_spin_lock_irqsave(&devtree_lock, flags); parent = of_node_get(node->parent); of_node_put(node); raw_spin_unlock_irqrestore(&devtree_lock, flags); return parent; } EXPORT_SYMBOL(of_get_next_parent); static struct device_node *__of_get_next_child(const struct device_node *node, struct device_node *prev) { struct device_node *next; if (!node) return NULL; next = prev ? prev->sibling : node->child; of_node_get(next); of_node_put(prev); return next; } #define __for_each_child_of_node(parent, child) \ for (child = __of_get_next_child(parent, NULL); child != NULL; \ child = __of_get_next_child(parent, child)) /** * of_get_next_child - Iterate a node childs * @node: parent node * @prev: previous child of the parent node, or NULL to get first * * Return: A node pointer with refcount incremented, use of_node_put() on * it when done. Returns NULL when prev is the last child. Decrements the * refcount of prev. */ struct device_node *of_get_next_child(const struct device_node *node, struct device_node *prev) { struct device_node *next; unsigned long flags; raw_spin_lock_irqsave(&devtree_lock, flags); next = __of_get_next_child(node, prev); raw_spin_unlock_irqrestore(&devtree_lock, flags); return next; } EXPORT_SYMBOL(of_get_next_child); static struct device_node *of_get_next_status_child(const struct device_node *node, struct device_node *prev, bool (*checker)(const struct device_node *)) { struct device_node *next; unsigned long flags; if (!node) return NULL; raw_spin_lock_irqsave(&devtree_lock, flags); next = prev ? prev->sibling : node->child; for (; next; next = next->sibling) { if (!checker(next)) continue; if (of_node_get(next)) break; } of_node_put(prev); raw_spin_unlock_irqrestore(&devtree_lock, flags); return next; } /** * of_get_next_available_child - Find the next available child node * @node: parent node * @prev: previous child of the parent node, or NULL to get first * * This function is like of_get_next_child(), except that it * automatically skips any disabled nodes (i.e. status = "disabled"). */ struct device_node *of_get_next_available_child(const struct device_node *node, struct device_node *prev) { return of_get_next_status_child(node, prev, __of_device_is_available); } EXPORT_SYMBOL(of_get_next_available_child); /** * of_get_next_reserved_child - Find the next reserved child node * @node: parent node * @prev: previous child of the parent node, or NULL to get first * * This function is like of_get_next_child(), except that it * automatically skips any disabled nodes (i.e. status = "disabled"). */ struct device_node *of_get_next_reserved_child(const struct device_node *node, struct device_node *prev) { return of_get_next_status_child(node, prev, __of_device_is_reserved); } EXPORT_SYMBOL(of_get_next_reserved_child); /** * of_get_next_cpu_node - Iterate on cpu nodes * @prev: previous child of the /cpus node, or NULL to get first * * Unusable CPUs (those with the status property set to "fail" or "fail-...") * will be skipped. * * Return: A cpu node pointer with refcount incremented, use of_node_put() * on it when done. Returns NULL when prev is the last child. Decrements * the refcount of prev. */ struct device_node *of_get_next_cpu_node(struct device_node *prev) { struct device_node *next = NULL; unsigned long flags; struct device_node *node; if (!prev) node = of_find_node_by_path("/cpus"); raw_spin_lock_irqsave(&devtree_lock, flags); if (prev) next = prev->sibling; else if (node) { next = node->child; of_node_put(node); } for (; next; next = next->sibling) { if (__of_device_is_fail(next)) continue; if (!(of_node_name_eq(next, "cpu") || __of_node_is_type(next, "cpu"))) continue; if (of_node_get(next)) break; } of_node_put(prev); raw_spin_unlock_irqrestore(&devtree_lock, flags); return next; } EXPORT_SYMBOL(of_get_next_cpu_node); /** * of_get_compatible_child - Find compatible child node * @parent: parent node * @compatible: compatible string * * Lookup child node whose compatible property contains the given compatible * string. * * Return: a node pointer with refcount incremented, use of_node_put() on it * when done; or NULL if not found. */ struct device_node *of_get_compatible_child(const struct device_node *parent, const char *compatible) { struct device_node *child; for_each_child_of_node(parent, child) { if (of_device_is_compatible(child, compatible)) break; } return child; } EXPORT_SYMBOL(of_get_compatible_child); /** * of_get_child_by_name - Find the child node by name for a given parent * @node: parent node * @name: child name to look for. * * This function looks for child node for given matching name * * Return: A node pointer if found, with refcount incremented, use * of_node_put() on it when done. * Returns NULL if node is not found. */ struct device_node *of_get_child_by_name(const struct device_node *node, const char *name) { struct device_node *child; for_each_child_of_node(node, child) if (of_node_name_eq(child, name)) break; return child; } EXPORT_SYMBOL(of_get_child_by_name); struct device_node *__of_find_node_by_path(struct device_node *parent, const char *path) { struct device_node *child; int len; len = strcspn(path, "/:"); if (!len) return NULL; __for_each_child_of_node(parent, child) { const char *name = kbasename(child->full_name); if (strncmp(path, name, len) == 0 && (strlen(name) == len)) return child; } return NULL; } struct device_node *__of_find_node_by_full_path(struct device_node *node, const char *path) { const char *separator = strchr(path, ':'); while (node && *path == '/') { struct device_node *tmp = node; path++; /* Increment past '/' delimiter */ node = __of_find_node_by_path(node, path); of_node_put(tmp); path = strchrnul(path, '/'); if (separator && separator < path) break; } return node; } /** * of_find_node_opts_by_path - Find a node matching a full OF path * @path: Either the full path to match, or if the path does not * start with '/', the name of a property of the /aliases * node (an alias). In the case of an alias, the node * matching the alias' value will be returned. * @opts: Address of a pointer into which to store the start of * an options string appended to the end of the path with * a ':' separator. * * Valid paths: * * /foo/bar Full path * * foo Valid alias * * foo/bar Valid alias + relative path * * Return: A node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_find_node_opts_by_path(const char *path, const char **opts) { struct device_node *np = NULL; struct property *pp; unsigned long flags; const char *separator = strchr(path, ':'); if (opts) *opts = separator ? separator + 1 : NULL; if (strcmp(path, "/") == 0) return of_node_get(of_root); /* The path could begin with an alias */ if (*path != '/') { int len; const char *p = separator; if (!p) p = strchrnul(path, '/'); len = p - path; /* of_aliases must not be NULL */ if (!of_aliases) return NULL; for_each_property_of_node(of_aliases, pp) { if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) { np = of_find_node_by_path(pp->value); break; } } if (!np) return NULL; path = p; } /* Step down the tree matching path components */ raw_spin_lock_irqsave(&devtree_lock, flags); if (!np) np = of_node_get(of_root); np = __of_find_node_by_full_path(np, path); raw_spin_unlock_irqrestore(&devtree_lock, flags); return np; } EXPORT_SYMBOL(of_find_node_opts_by_path); /** * of_find_node_by_name - Find a node by its "name" property * @from: The node to start searching from or NULL; the node * you pass will not be searched, only the next one * will. Typically, you pass what the previous call * returned. of_node_put() will be called on @from. * @name: The name string to match against * * Return: A node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_find_node_by_name(struct device_node *from, const char *name) { struct device_node *np; unsigned long flags; raw_spin_lock_irqsave(&devtree_lock, flags); for_each_of_allnodes_from(from, np) if (of_node_name_eq(np, name) && of_node_get(np)) break; of_node_put(from); raw_spin_unlock_irqrestore(&devtree_lock, flags); return np; } EXPORT_SYMBOL(of_find_node_by_name); /** * of_find_node_by_type - Find a node by its "device_type" property * @from: The node to start searching from, or NULL to start searching * the entire device tree. The node you pass will not be * searched, only the next one will; typically, you pass * what the previous call returned. of_node_put() will be * called on from for you. * @type: The type string to match against * * Return: A node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_find_node_by_type(struct device_node *from, const char *type) { struct device_node *np; unsigned long flags; raw_spin_lock_irqsave(&devtree_lock, flags); for_each_of_allnodes_from(from, np) if (__of_node_is_type(np, type) && of_node_get(np)) break; of_node_put(from); raw_spin_unlock_irqrestore(&devtree_lock, flags); return np; } EXPORT_SYMBOL(of_find_node_by_type); /** * of_find_compatible_node - Find a node based on type and one of the * tokens in its "compatible" property * @from: The node to start searching from or NULL, the node * you pass will not be searched, only the next one * will; typically, you pass what the previous call * returned. of_node_put() will be called on it * @type: The type string to match "device_type" or NULL to ignore * @compatible: The string to match to one of the tokens in the device * "compatible" list. * * Return: A node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_find_compatible_node(struct device_node *from, const char *type, const char *compatible) { struct device_node *np; unsigned long flags; raw_spin_lock_irqsave(&devtree_lock, flags); for_each_of_allnodes_from(from, np) if (__of_device_is_compatible(np, compatible, type, NULL) && of_node_get(np)) break; of_node_put(from); raw_spin_unlock_irqrestore(&devtree_lock, flags); return np; } EXPORT_SYMBOL(of_find_compatible_node); /** * of_find_node_with_property - Find a node which has a property with * the given name. * @from: The node to start searching from or NULL, the node * you pass will not be searched, only the next one * will; typically, you pass what the previous call * returned. of_node_put() will be called on it * @prop_name: The name of the property to look for. * * Return: A node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_find_node_with_property(struct device_node *from, const char *prop_name) { struct device_node *np; struct property *pp; unsigned long flags; raw_spin_lock_irqsave(&devtree_lock, flags); for_each_of_allnodes_from(from, np) { for (pp = np->properties; pp; pp = pp->next) { if (of_prop_cmp(pp->name, prop_name) == 0) { of_node_get(np); goto out; } } } out: of_node_put(from); raw_spin_unlock_irqrestore(&devtree_lock, flags); return np; } EXPORT_SYMBOL(of_find_node_with_property); static const struct of_device_id *__of_match_node(const struct of_device_id *matches, const struct device_node *node) { const struct of_device_id *best_match = NULL; int score, best_score = 0; if (!matches) return NULL; for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) { score = __of_device_is_compatible(node, matches->compatible, matches->type, matches->name); if (score > best_score) { best_match = matches; best_score = score; } } return best_match; } /** * of_match_node - Tell if a device_node has a matching of_match structure * @matches: array of of device match structures to search in * @node: the of device structure to match against * * Low level utility function used by device matching. */ const struct of_device_id *of_match_node(const struct of_device_id *matches, const struct device_node *node) { const struct of_device_id *match; unsigned long flags; raw_spin_lock_irqsave(&devtree_lock, flags); match = __of_match_node(matches, node); raw_spin_unlock_irqrestore(&devtree_lock, flags); return match; } EXPORT_SYMBOL(of_match_node); /** * of_find_matching_node_and_match - Find a node based on an of_device_id * match table. * @from: The node to start searching from or NULL, the node * you pass will not be searched, only the next one * will; typically, you pass what the previous call * returned. of_node_put() will be called on it * @matches: array of of device match structures to search in * @match: Updated to point at the matches entry which matched * * Return: A node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_find_matching_node_and_match(struct device_node *from, const struct of_device_id *matches, const struct of_device_id **match) { struct device_node *np; const struct of_device_id *m; unsigned long flags; if (match) *match = NULL; raw_spin_lock_irqsave(&devtree_lock, flags); for_each_of_allnodes_from(from, np) { m = __of_match_node(matches, np); if (m && of_node_get(np)) { if (match) *match = m; break; } } of_node_put(from); raw_spin_unlock_irqrestore(&devtree_lock, flags); return np; } EXPORT_SYMBOL(of_find_matching_node_and_match); /** * of_alias_from_compatible - Lookup appropriate alias for a device node * depending on compatible * @node: pointer to a device tree node * @alias: Pointer to buffer that alias value will be copied into * @len: Length of alias value * * Based on the value of the compatible property, this routine will attempt * to choose an appropriate alias value for a particular device tree node. * It does this by stripping the manufacturer prefix (as delimited by a ',') * from the first entry in the compatible list property. * * Note: The matching on just the "product" side of the compatible is a relic * from I2C and SPI. Please do not add any new user. * * Return: This routine returns 0 on success, <0 on failure. */ int of_alias_from_compatible(const struct device_node *node, char *alias, int len) { const char *compatible, *p; int cplen; compatible = of_get_property(node, "compatible", &cplen); if (!compatible || strlen(compatible) > cplen) return -ENODEV; p = strchr(compatible, ','); strscpy(alias, p ? p + 1 : compatible, len); return 0; } EXPORT_SYMBOL_GPL(of_alias_from_compatible); /** * of_find_node_by_phandle - Find a node given a phandle * @handle: phandle of the node to find * * Return: A node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_find_node_by_phandle(phandle handle) { struct device_node *np = NULL; unsigned long flags; u32 handle_hash; if (!handle) return NULL; handle_hash = of_phandle_cache_hash(handle); raw_spin_lock_irqsave(&devtree_lock, flags); if (phandle_cache[handle_hash] && handle == phandle_cache[handle_hash]->phandle) np = phandle_cache[handle_hash]; if (!np) { for_each_of_allnodes(np) if (np->phandle == handle && !of_node_check_flag(np, OF_DETACHED)) { phandle_cache[handle_hash] = np; break; } } of_node_get(np); raw_spin_unlock_irqrestore(&devtree_lock, flags); return np; } EXPORT_SYMBOL(of_find_node_by_phandle); void of_print_phandle_args(const char *msg, const struct of_phandle_args *args) { int i; printk("%s %pOF", msg, args->np); for (i = 0; i < args->args_count; i++) { const char delim = i ? ',' : ':'; pr_cont("%c%08x", delim, args->args[i]); } pr_cont("\n"); } int of_phandle_iterator_init(struct of_phandle_iterator *it, const struct device_node *np, const char *list_name, const char *cells_name, int cell_count) { const __be32 *list; int size; memset(it, 0, sizeof(*it)); /* * one of cell_count or cells_name must be provided to determine the * argument length. */ if (cell_count < 0 && !cells_name) return -EINVAL; list = of_get_property(np, list_name, &size); if (!list) return -ENOENT; it->cells_name = cells_name; it->cell_count = cell_count; it->parent = np; it->list_end = list + size / sizeof(*list); it->phandle_end = list; it->cur = list; return 0; } EXPORT_SYMBOL_GPL(of_phandle_iterator_init); int of_phandle_iterator_next(struct of_phandle_iterator *it) { uint32_t count = 0; if (it->node) { of_node_put(it->node); it->node = NULL; } if (!it->cur || it->phandle_end >= it->list_end) return -ENOENT; it->cur = it->phandle_end; /* If phandle is 0, then it is an empty entry with no arguments. */ it->phandle = be32_to_cpup(it->cur++); if (it->phandle) { /* * Find the provider node and parse the #*-cells property to * determine the argument length. */ it->node = of_find_node_by_phandle(it->phandle); if (it->cells_name) { if (!it->node) { pr_err("%pOF: could not find phandle %d\n", it->parent, it->phandle); goto err; } if (of_property_read_u32(it->node, it->cells_name, &count)) { /* * If both cell_count and cells_name is given, * fall back to cell_count in absence * of the cells_name property */ if (it->cell_count >= 0) { count = it->cell_count; } else { pr_err("%pOF: could not get %s for %pOF\n", it->parent, it->cells_name, it->node); goto err; } } } else { count = it->cell_count; } /* * Make sure that the arguments actually fit in the remaining * property data length */ if (it->cur + count > it->list_end) { if (it->cells_name) pr_err("%pOF: %s = %d found %td\n", it->parent, it->cells_name, count, it->list_end - it->cur); else pr_err("%pOF: phandle %s needs %d, found %td\n", it->parent, of_node_full_name(it->node), count, it->list_end - it->cur); goto err; } } it->phandle_end = it->cur + count; it->cur_count = count; return 0; err: if (it->node) { of_node_put(it->node); it->node = NULL; } return -EINVAL; } EXPORT_SYMBOL_GPL(of_phandle_iterator_next); int of_phandle_iterator_args(struct of_phandle_iterator *it, uint32_t *args, int size) { int i, count; count = it->cur_count; if (WARN_ON(size < count)) count = size; for (i = 0; i < count; i++) args[i] = be32_to_cpup(it->cur++); return count; } int __of_parse_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name, int cell_count, int index, struct of_phandle_args *out_args) { struct of_phandle_iterator it; int rc, cur_index = 0; if (index < 0) return -EINVAL; /* Loop over the phandles until all the requested entry is found */ of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) { /* * All of the error cases bail out of the loop, so at * this point, the parsing is successful. If the requested * index matches, then fill the out_args structure and return, * or return -ENOENT for an empty entry. */ rc = -ENOENT; if (cur_index == index) { if (!it.phandle) goto err; if (out_args) { int c; c = of_phandle_iterator_args(&it, out_args->args, MAX_PHANDLE_ARGS); out_args->np = it.node; out_args->args_count = c; } else { of_node_put(it.node); } /* Found it! return success */ return 0; } cur_index++; } /* * Unlock node before returning result; will be one of: * -ENOENT : index is for empty phandle * -EINVAL : parsing error on data */ err: of_node_put(it.node); return rc; } EXPORT_SYMBOL(__of_parse_phandle_with_args); /** * of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it * @np: pointer to a device tree node containing a list * @list_name: property name that contains a list * @stem_name: stem of property names that specify phandles' arguments count * @index: index of a phandle to parse out * @out_args: optional pointer to output arguments structure (will be filled) * * This function is useful to parse lists of phandles and their arguments. * Returns 0 on success and fills out_args, on error returns appropriate errno * value. The difference between this function and of_parse_phandle_with_args() * is that this API remaps a phandle if the node the phandle points to has * a <@stem_name>-map property. * * Caller is responsible to call of_node_put() on the returned out_args->np * pointer. * * Example:: * * phandle1: node1 { * #list-cells = <2>; * }; * * phandle2: node2 { * #list-cells = <1>; * }; * * phandle3: node3 { * #list-cells = <1>; * list-map = <0 &phandle2 3>, * <1 &phandle2 2>, * <2 &phandle1 5 1>; * list-map-mask = <0x3>; * }; * * node4 { * list = <&phandle1 1 2 &phandle3 0>; * }; * * To get a device_node of the ``node2`` node you may call this: * of_parse_phandle_with_args(node4, "list", "list", 1, &args); */ int of_parse_phandle_with_args_map(const struct device_node *np, const char *list_name, const char *stem_name, int index, struct of_phandle_args *out_args) { char *cells_name __free(kfree) = kasprintf(GFP_KERNEL, "#%s-cells", stem_name); char *map_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map", stem_name); char *mask_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name); char *pass_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name); struct device_node *cur, *new = NULL; const __be32 *map, *mask, *pass; static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) }; static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(0) }; __be32 initial_match_array[MAX_PHANDLE_ARGS]; const __be32 *match_array = initial_match_array; int i, ret, map_len, match; u32 list_size, new_size; if (index < 0) return -EINVAL; if (!cells_name || !map_name || !mask_name || !pass_name) return -ENOMEM; ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index, out_args); if (ret) return ret; /* Get the #<list>-cells property */ cur = out_args->np; ret = of_property_read_u32(cur, cells_name, &list_size); if (ret < 0) goto put; /* Precalculate the match array - this simplifies match loop */ for (i = 0; i < list_size; i++) initial_match_array[i] = cpu_to_be32(out_args->args[i]); ret = -EINVAL; while (cur) { /* Get the <list>-map property */ map = of_get_property(cur, map_name, &map_len); if (!map) { return 0; } map_len /= sizeof(u32); /* Get the <list>-map-mask property (optional) */ mask = of_get_property(cur, mask_name, NULL); if (!mask) mask = dummy_mask; /* Iterate through <list>-map property */ match = 0; while (map_len > (list_size + 1) && !match) { /* Compare specifiers */ match = 1; for (i = 0; i < list_size; i++, map_len--) match &= !((match_array[i] ^ *map++) & mask[i]); of_node_put(new); new = of_find_node_by_phandle(be32_to_cpup(map)); map++; map_len--; /* Check if not found */ if (!new) goto put; if (!of_device_is_available(new)) match = 0; ret = of_property_read_u32(new, cells_name, &new_size); if (ret) goto put; /* Check for malformed properties */ if (WARN_ON(new_size > MAX_PHANDLE_ARGS)) goto put; if (map_len < new_size) goto put; /* Move forward by new node's #<list>-cells amount */ map += new_size; map_len -= new_size; } if (!match) goto put; /* Get the <list>-map-pass-thru property (optional) */ pass = of_get_property(cur, pass_name, NULL); if (!pass) pass = dummy_pass; /* * Successfully parsed a <list>-map translation; copy new * specifier into the out_args structure, keeping the * bits specified in <list>-map-pass-thru. */ match_array = map - new_size; for (i = 0; i < new_size; i++) { __be32 val = *(map - new_size + i); if (i < list_size) { val &= ~pass[i]; val |= cpu_to_be32(out_args->args[i]) & pass[i]; } out_args->args[i] = be32_to_cpu(val); } out_args->args_count = list_size = new_size; /* Iterate again with new provider */ out_args->np = new; of_node_put(cur); cur = new; new = NULL; } put: of_node_put(cur); of_node_put(new); return ret; } EXPORT_SYMBOL(of_parse_phandle_with_args_map); /** * of_count_phandle_with_args() - Find the number of phandles references in a property * @np: pointer to a device tree node containing a list * @list_name: property name that contains a list * @cells_name: property name that specifies phandles' arguments count * * Return: The number of phandle + argument tuples within a property. It * is a typical pattern to encode a list of phandle and variable * arguments into a single property. The number of arguments is encoded * by a property in the phandle-target node. For example, a gpios * property would contain a list of GPIO specifies consisting of a * phandle and 1 or more arguments. The number of arguments are * determined by the #gpio-cells property in the node pointed to by the * phandle. */ int of_count_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name) { struct of_phandle_iterator it; int rc, cur_index = 0; /* * If cells_name is NULL we assume a cell count of 0. This makes * counting the phandles trivial as each 32bit word in the list is a * phandle and no arguments are to consider. So we don't iterate through * the list but just use the length to determine the phandle count. */ if (!cells_name) { const __be32 *list; int size; list = of_get_property(np, list_name, &size); if (!list) return -ENOENT; return size / sizeof(*list); } rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1); if (rc) return rc; while ((rc = of_phandle_iterator_next(&it)) == 0) cur_index += 1; if (rc != -ENOENT) return rc; return cur_index; } EXPORT_SYMBOL(of_count_phandle_with_args); static struct property *__of_remove_property_from_list(struct property **list, struct property *prop) { struct property **next; for (next = list; *next; next = &(*next)->next) { if (*next == prop) { *next = prop->next; prop->next = NULL; return prop; } } return NULL; } /** * __of_add_property - Add a property to a node without lock operations * @np: Caller's Device Node * @prop: Property to add */ int __of_add_property(struct device_node *np, struct property *prop) { int rc = 0; unsigned long flags; struct property **next; raw_spin_lock_irqsave(&devtree_lock, flags); __of_remove_property_from_list(&np->deadprops, prop); prop->next = NULL; next = &np->properties; while (*next) { if (strcmp(prop->name, (*next)->name) == 0) { /* duplicate ! don't insert it */ rc = -EEXIST; goto out_unlock; } next = &(*next)->next; } *next = prop; out_unlock: raw_spin_unlock_irqrestore(&devtree_lock, flags); if (rc) return rc; __of_add_property_sysfs(np, prop); return 0; } /** * of_add_property - Add a property to a node * @np: Caller's Device Node * @prop: Property to add */ int of_add_property(struct device_node *np, struct property *prop) { int rc; mutex_lock(&of_mutex); rc = __of_add_property(np, prop); mutex_unlock(&of_mutex); if (!rc) of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL); return rc; } EXPORT_SYMBOL_GPL(of_add_property); int __of_remove_property(struct device_node *np, struct property *prop) { unsigned long flags; int rc = -ENODEV; raw_spin_lock_irqsave(&devtree_lock, flags); if (__of_remove_property_from_list(&np->properties, prop)) { /* Found the property, add it to deadprops list */ prop->next = np->deadprops; np->deadprops = prop; rc = 0; } raw_spin_unlock_irqrestore(&devtree_lock, flags); if (rc) return rc; __of_remove_property_sysfs(np, prop); return 0; } /** * of_remove_property - Remove a property from a node. * @np: Caller's Device Node * @prop: Property to remove * * Note that we don't actually remove it, since we have given out * who-knows-how-many pointers to the data using get-property. * Instead we just move the property to the "dead properties" * list, so it won't be found any more. */ int of_remove_property(struct device_node *np, struct property *prop) { int rc; if (!prop) return -ENODEV; mutex_lock(&of_mutex); rc = __of_remove_property(np, prop); mutex_unlock(&of_mutex); if (!rc) of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL); return rc; } EXPORT_SYMBOL_GPL(of_remove_property); int __of_update_property(struct device_node *np, struct property *newprop, struct property **oldpropp) { struct property **next, *oldprop; unsigned long flags; raw_spin_lock_irqsave(&devtree_lock, flags); __of_remove_property_from_list(&np->deadprops, newprop); for (next = &np->properties; *next; next = &(*next)->next) { if (of_prop_cmp((*next)->name, newprop->name) == 0) break; } *oldpropp = oldprop = *next; if (oldprop) { /* replace the node */ newprop->next = oldprop->next; *next = newprop; oldprop->next = np->deadprops; np->deadprops = oldprop; } else { /* new node */ newprop->next = NULL; *next = newprop; } raw_spin_unlock_irqrestore(&devtree_lock, flags); __of_update_property_sysfs(np, newprop, oldprop); return 0; } /* * of_update_property - Update a property in a node, if the property does * not exist, add it. * * Note that we don't actually remove it, since we have given out * who-knows-how-many pointers to the data using get-property. * Instead we just move the property to the "dead properties" list, * and add the new property to the property list */ int of_update_property(struct device_node *np, struct property *newprop) { struct property *oldprop; int rc; if (!newprop->name) return -EINVAL; mutex_lock(&of_mutex); rc = __of_update_property(np, newprop, &oldprop); mutex_unlock(&of_mutex); if (!rc) of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop); return rc; } static void of_alias_add(struct alias_prop *ap, struct device_node *np, int id, const char *stem, int stem_len) { ap->np = np; ap->id = id; strscpy(ap->stem, stem, stem_len + 1); list_add_tail(&ap->link, &aliases_lookup); pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n", ap->alias, ap->stem, ap->id, np); } /** * of_alias_scan - Scan all properties of the 'aliases' node * @dt_alloc: An allocator that provides a virtual address to memory * for storing the resulting tree * * The function scans all the properties of the 'aliases' node and populates * the global lookup table with the properties. It returns the * number of alias properties found, or an error code in case of failure. */ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) { struct property *pp; of_aliases = of_find_node_by_path("/aliases"); of_chosen = of_find_node_by_path("/chosen"); if (of_chosen == NULL) of_chosen = of_find_node_by_path("/chosen@0"); if (of_chosen) { /* linux,stdout-path and /aliases/stdout are for legacy compatibility */ const char *name = NULL; if (of_property_read_string(of_chosen, "stdout-path", &name)) of_property_read_string(of_chosen, "linux,stdout-path", &name); if (IS_ENABLED(CONFIG_PPC) && !name) of_property_read_string(of_aliases, "stdout", &name); if (name) of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); if (of_stdout) of_stdout->fwnode.flags |= FWNODE_FLAG_BEST_EFFORT; } if (!of_aliases) return; for_each_property_of_node(of_aliases, pp) { const char *start = pp->name; const char *end = start + strlen(start); struct device_node *np; struct alias_prop *ap; int id, len; /* Skip those we do not want to proceed */ if (!strcmp(pp->name, "name") || !strcmp(pp->name, "phandle") || !strcmp(pp->name, "linux,phandle")) continue; np = of_find_node_by_path(pp->value); if (!np) continue; /* walk the alias backwards to extract the id and work out * the 'stem' string */ while (isdigit(*(end-1)) && end > start) end--; len = end - start; if (kstrtoint(end, 10, &id) < 0) continue; /* Allocate an alias_prop with enough space for the stem */ ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap)); if (!ap) continue; memset(ap, 0, sizeof(*ap) + len + 1); ap->alias = start; of_alias_add(ap, np, id, start, len); } } /** * of_alias_get_id - Get alias id for the given device_node * @np: Pointer to the given device_node * @stem: Alias stem of the given device_node * * The function travels the lookup table to get the alias id for the given * device_node and alias stem. * * Return: The alias id if found. */ int of_alias_get_id(struct device_node *np, const char *stem) { struct alias_prop *app; int id = -ENODEV; mutex_lock(&of_mutex); list_for_each_entry(app, &aliases_lookup, link) { if (strcmp(app->stem, stem) != 0) continue; if (np == app->np) { id = app->id; break; } } mutex_unlock(&of_mutex); return id; } EXPORT_SYMBOL_GPL(of_alias_get_id); /** * of_alias_get_highest_id - Get highest alias id for the given stem * @stem: Alias stem to be examined * * The function travels the lookup table to get the highest alias id for the * given alias stem. It returns the alias id if found. */ int of_alias_get_highest_id(const char *stem) { struct alias_prop *app; int id = -ENODEV; mutex_lock(&of_mutex); list_for_each_entry(app, &aliases_lookup, link) { if (strcmp(app->stem, stem) != 0) continue; if (app->id > id) id = app->id; } mutex_unlock(&of_mutex); return id; } EXPORT_SYMBOL_GPL(of_alias_get_highest_id); /** * of_console_check() - Test and setup console for DT setup * @dn: Pointer to device node * @name: Name to use for preferred console without index. ex. "ttyS" * @index: Index to use for preferred console. * * Check if the given device node matches the stdout-path property in the * /chosen node. If it does then register it as the preferred console. * * Return: TRUE if console successfully setup. Otherwise return FALSE. */ bool of_console_check(struct device_node *dn, char *name, int index) { if (!dn || dn != of_stdout || console_set_on_cmdline) return false; /* * XXX: cast `options' to char pointer to suppress complication * warnings: printk, UART and console drivers expect char pointer. */ return !add_preferred_console(name, index, (char *)of_stdout_options); } EXPORT_SYMBOL_GPL(of_console_check); /** * of_find_next_cache_node - Find a node's subsidiary cache * @np: node of type "cpu" or "cache" * * Return: A node pointer with refcount incremented, use * of_node_put() on it when done. Caller should hold a reference * to np. */ struct device_node *of_find_next_cache_node(const struct device_node *np) { struct device_node *child, *cache_node; cache_node = of_parse_phandle(np, "l2-cache", 0); if (!cache_node) cache_node = of_parse_phandle(np, "next-level-cache", 0); if (cache_node) return cache_node; /* OF on pmac has nodes instead of properties named "l2-cache" * beneath CPU nodes. */ if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu")) for_each_child_of_node(np, child) if (of_node_is_type(child, "cache")) return child; return NULL; } /** * of_find_last_cache_level - Find the level at which the last cache is * present for the given logical cpu * * @cpu: cpu number(logical index) for which the last cache level is needed * * Return: The level at which the last cache is present. It is exactly * same as the total number of cache levels for the given logical cpu. */ int of_find_last_cache_level(unsigned int cpu) { u32 cache_level = 0; struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu); while (np) { of_node_put(prev); prev = np; np = of_find_next_cache_node(np); } of_property_read_u32(prev, "cache-level", &cache_level); of_node_put(prev); return cache_level; } /** * of_map_id - Translate an ID through a downstream mapping. * @np: root complex device node. * @id: device ID to map. * @map_name: property name of the map to use. * @map_mask_name: optional property name of the mask to use. * @target: optional pointer to a target device node. * @id_out: optional pointer to receive the translated ID. * * Given a device ID, look up the appropriate implementation-defined * platform ID and/or the target device which receives transactions on that * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or * @id_out may be NULL if only the other is required. If @target points to * a non-NULL device node pointer, only entries targeting that node will be * matched; if it points to a NULL value, it will receive the device node of * the first matching target phandle, with a reference held. * * Return: 0 on success or a standard error code on failure. */ int of_map_id(struct device_node *np, u32 id, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out) { u32 map_mask, masked_id; int map_len; const __be32 *map = NULL; if (!np || !map_name || (!target && !id_out)) return -EINVAL; map = of_get_property(np, map_name, &map_len); if (!map) { if (target) return -ENODEV; /* Otherwise, no map implies no translation */ *id_out = id; return 0; } if (!map_len || map_len % (4 * sizeof(*map))) { pr_err("%pOF: Error: Bad %s length: %d\n", np, map_name, map_len); return -EINVAL; } /* The default is to select all bits. */ map_mask = 0xffffffff; /* * Can be overridden by "{iommu,msi}-map-mask" property. * If of_property_read_u32() fails, the default is used. */ if (map_mask_name) of_property_read_u32(np, map_mask_name, &map_mask); masked_id = map_mask & id; for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) { struct device_node *phandle_node; u32 id_base = be32_to_cpup(map + 0); u32 phandle = be32_to_cpup(map + 1); u32 out_base = be32_to_cpup(map + 2); u32 id_len = be32_to_cpup(map + 3); if (id_base & ~map_mask) { pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n", np, map_name, map_name, map_mask, id_base); return -EFAULT; } if (masked_id < id_base || masked_id >= id_base + id_len) continue; phandle_node = of_find_node_by_phandle(phandle); if (!phandle_node) return -ENODEV; if (target) { if (*target) of_node_put(phandle_node); else *target = phandle_node; if (*target != phandle_node) continue; } if (id_out) *id_out = masked_id - id_base + out_base; pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n", np, map_name, map_mask, id_base, out_base, id_len, id, masked_id - id_base + out_base); return 0; } pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name, id, target && *target ? *target : NULL); /* Bypasses translation */ if (id_out) *id_out = id; return 0; } EXPORT_SYMBOL_GPL(of_map_id);
5 5 1 4 3 16 16 16 45 23 1 4 2 1 5 2 2 4 5 17 17 17 17 17 17 17 16 17 17 17 17 17 17 17 17 17 14 8 14 14 14 14 13 14 14 14 14 14 56 56 4 2 4 1 3 11 13 45 49 19 17 3 16 16 16 16 16 13 13 13 13 13 13 13 13 13 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 // SPDX-License-Identifier: GPL-2.0-or-later /* * Digital Audio (PCM) abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/time.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/nospec.h> #include <sound/core.h> #include <sound/minors.h> #include <sound/pcm.h> #include <sound/timer.h> #include <sound/control.h> #include <sound/info.h> #include "pcm_local.h" MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Abramo Bagnara <abramo@alsa-project.org>"); MODULE_DESCRIPTION("Midlevel PCM code for ALSA."); MODULE_LICENSE("GPL"); static LIST_HEAD(snd_pcm_devices); static DEFINE_MUTEX(register_mutex); #if IS_ENABLED(CONFIG_SND_PCM_OSS) static LIST_HEAD(snd_pcm_notify_list); #endif static int snd_pcm_free(struct snd_pcm *pcm); static int snd_pcm_dev_free(struct snd_device *device); static int snd_pcm_dev_register(struct snd_device *device); static int snd_pcm_dev_disconnect(struct snd_device *device); static struct snd_pcm *snd_pcm_get(struct snd_card *card, int device) { struct snd_pcm *pcm; list_for_each_entry(pcm, &snd_pcm_devices, list) { if (pcm->card == card && pcm->device == device) return pcm; } return NULL; } static int snd_pcm_next(struct snd_card *card, int device) { struct snd_pcm *pcm; list_for_each_entry(pcm, &snd_pcm_devices, list) { if (pcm->card == card && pcm->device > device) return pcm->device; else if (pcm->card->number > card->number) return -1; } return -1; } static int snd_pcm_add(struct snd_pcm *newpcm) { struct snd_pcm *pcm; if (newpcm->internal) return 0; list_for_each_entry(pcm, &snd_pcm_devices, list) { if (pcm->card == newpcm->card && pcm->device == newpcm->device) return -EBUSY; if (pcm->card->number > newpcm->card->number || (pcm->card == newpcm->card && pcm->device > newpcm->device)) { list_add(&newpcm->list, pcm->list.prev); return 0; } } list_add_tail(&newpcm->list, &snd_pcm_devices); return 0; } static int snd_pcm_control_ioctl(struct snd_card *card, struct snd_ctl_file *control, unsigned int cmd, unsigned long arg) { switch (cmd) { case SNDRV_CTL_IOCTL_PCM_NEXT_DEVICE: { int device; if (get_user(device, (int __user *)arg)) return -EFAULT; scoped_guard(mutex, &register_mutex) device = snd_pcm_next(card, device); if (put_user(device, (int __user *)arg)) return -EFAULT; return 0; } case SNDRV_CTL_IOCTL_PCM_INFO: { struct snd_pcm_info __user *info; unsigned int device, subdevice; int stream; struct snd_pcm *pcm; struct snd_pcm_str *pstr; struct snd_pcm_substream *substream; info = (struct snd_pcm_info __user *)arg; if (get_user(device, &info->device)) return -EFAULT; if (get_user(stream, &info->stream)) return -EFAULT; if (stream < 0 || stream > 1) return -EINVAL; stream = array_index_nospec(stream, 2); if (get_user(subdevice, &info->subdevice)) return -EFAULT; guard(mutex)(&register_mutex); pcm = snd_pcm_get(card, device); if (pcm == NULL) return -ENXIO; pstr = &pcm->streams[stream]; if (pstr->substream_count == 0) return -ENOENT; if (subdevice >= pstr->substream_count) return -ENXIO; for (substream = pstr->substream; substream; substream = substream->next) if (substream->number == (int)subdevice) break; if (substream == NULL) return -ENXIO; guard(mutex)(&pcm->open_mutex); return snd_pcm_info_user(substream, info); } case SNDRV_CTL_IOCTL_PCM_PREFER_SUBDEVICE: { int val; if (get_user(val, (int __user *)arg)) return -EFAULT; control->preferred_subdevice[SND_CTL_SUBDEV_PCM] = val; return 0; } } return -ENOIOCTLCMD; } #define FORMAT(v) [SNDRV_PCM_FORMAT_##v] = #v static const char * const snd_pcm_format_names[] = { FORMAT(S8), FORMAT(U8), FORMAT(S16_LE), FORMAT(S16_BE), FORMAT(U16_LE), FORMAT(U16_BE), FORMAT(S24_LE), FORMAT(S24_BE), FORMAT(U24_LE), FORMAT(U24_BE), FORMAT(S32_LE), FORMAT(S32_BE), FORMAT(U32_LE), FORMAT(U32_BE), FORMAT(FLOAT_LE), FORMAT(FLOAT_BE), FORMAT(FLOAT64_LE), FORMAT(FLOAT64_BE), FORMAT(IEC958_SUBFRAME_LE), FORMAT(IEC958_SUBFRAME_BE), FORMAT(MU_LAW), FORMAT(A_LAW), FORMAT(IMA_ADPCM), FORMAT(MPEG), FORMAT(GSM), FORMAT(SPECIAL), FORMAT(S24_3LE), FORMAT(S24_3BE), FORMAT(U24_3LE), FORMAT(U24_3BE), FORMAT(S20_3LE), FORMAT(S20_3BE), FORMAT(U20_3LE), FORMAT(U20_3BE), FORMAT(S18_3LE), FORMAT(S18_3BE), FORMAT(U18_3LE), FORMAT(U18_3BE), FORMAT(G723_24), FORMAT(G723_24_1B), FORMAT(G723_40), FORMAT(G723_40_1B), FORMAT(DSD_U8), FORMAT(DSD_U16_LE), FORMAT(DSD_U32_LE), FORMAT(DSD_U16_BE), FORMAT(DSD_U32_BE), FORMAT(S20_LE), FORMAT(S20_BE), FORMAT(U20_LE), FORMAT(U20_BE), }; /** * snd_pcm_format_name - Return a name string for the given PCM format * @format: PCM format * * Return: the format name string */ const char *snd_pcm_format_name(snd_pcm_format_t format) { unsigned int format_num = (__force unsigned int)format; if (format_num >= ARRAY_SIZE(snd_pcm_format_names) || !snd_pcm_format_names[format_num]) return "Unknown"; return snd_pcm_format_names[format_num]; } EXPORT_SYMBOL_GPL(snd_pcm_format_name); #ifdef CONFIG_SND_VERBOSE_PROCFS #define STATE(v) [SNDRV_PCM_STATE_##v] = #v #define STREAM(v) [SNDRV_PCM_STREAM_##v] = #v #define READY(v) [SNDRV_PCM_READY_##v] = #v #define XRUN(v) [SNDRV_PCM_XRUN_##v] = #v #define SILENCE(v) [SNDRV_PCM_SILENCE_##v] = #v #define TSTAMP(v) [SNDRV_PCM_TSTAMP_##v] = #v #define ACCESS(v) [SNDRV_PCM_ACCESS_##v] = #v #define START(v) [SNDRV_PCM_START_##v] = #v #define SUBFORMAT(v) [SNDRV_PCM_SUBFORMAT_##v] = #v static const char * const snd_pcm_stream_names[] = { STREAM(PLAYBACK), STREAM(CAPTURE), }; static const char * const snd_pcm_state_names[] = { STATE(OPEN), STATE(SETUP), STATE(PREPARED), STATE(RUNNING), STATE(XRUN), STATE(DRAINING), STATE(PAUSED), STATE(SUSPENDED), STATE(DISCONNECTED), }; static const char * const snd_pcm_access_names[] = { ACCESS(MMAP_INTERLEAVED), ACCESS(MMAP_NONINTERLEAVED), ACCESS(MMAP_COMPLEX), ACCESS(RW_INTERLEAVED), ACCESS(RW_NONINTERLEAVED), }; static const char * const snd_pcm_subformat_names[] = { SUBFORMAT(STD), SUBFORMAT(MSBITS_MAX), SUBFORMAT(MSBITS_20), SUBFORMAT(MSBITS_24), }; static const char * const snd_pcm_tstamp_mode_names[] = { TSTAMP(NONE), TSTAMP(ENABLE), }; static const char *snd_pcm_stream_name(int stream) { return snd_pcm_stream_names[stream]; } static const char *snd_pcm_access_name(snd_pcm_access_t access) { return snd_pcm_access_names[(__force int)access]; } static const char *snd_pcm_subformat_name(snd_pcm_subformat_t subformat) { return snd_pcm_subformat_names[(__force int)subformat]; } static const char *snd_pcm_tstamp_mode_name(int mode) { return snd_pcm_tstamp_mode_names[mode]; } static const char *snd_pcm_state_name(snd_pcm_state_t state) { return snd_pcm_state_names[(__force int)state]; } #if IS_ENABLED(CONFIG_SND_PCM_OSS) #include <linux/soundcard.h> static const char *snd_pcm_oss_format_name(int format) { switch (format) { case AFMT_MU_LAW: return "MU_LAW"; case AFMT_A_LAW: return "A_LAW"; case AFMT_IMA_ADPCM: return "IMA_ADPCM"; case AFMT_U8: return "U8"; case AFMT_S16_LE: return "S16_LE"; case AFMT_S16_BE: return "S16_BE"; case AFMT_S8: return "S8"; case AFMT_U16_LE: return "U16_LE"; case AFMT_U16_BE: return "U16_BE"; case AFMT_MPEG: return "MPEG"; default: return "unknown"; } } #endif static void snd_pcm_proc_info_read(struct snd_pcm_substream *substream, struct snd_info_buffer *buffer) { struct snd_pcm_info *info __free(kfree) = NULL; int err; if (! substream) return; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return; err = snd_pcm_info(substream, info); if (err < 0) { snd_iprintf(buffer, "error %d\n", err); return; } snd_iprintf(buffer, "card: %d\n", info->card); snd_iprintf(buffer, "device: %d\n", info->device); snd_iprintf(buffer, "subdevice: %d\n", info->subdevice); snd_iprintf(buffer, "stream: %s\n", snd_pcm_stream_name(info->stream)); snd_iprintf(buffer, "id: %s\n", info->id); snd_iprintf(buffer, "name: %s\n", info->name); snd_iprintf(buffer, "subname: %s\n", info->subname); snd_iprintf(buffer, "class: %d\n", info->dev_class); snd_iprintf(buffer, "subclass: %d\n", info->dev_subclass); snd_iprintf(buffer, "subdevices_count: %d\n", info->subdevices_count); snd_iprintf(buffer, "subdevices_avail: %d\n", info->subdevices_avail); } static void snd_pcm_stream_proc_info_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { snd_pcm_proc_info_read(((struct snd_pcm_str *)entry->private_data)->substream, buffer); } static void snd_pcm_substream_proc_info_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { snd_pcm_proc_info_read(entry->private_data, buffer); } static void snd_pcm_substream_proc_hw_params_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcm_substream *substream = entry->private_data; struct snd_pcm_runtime *runtime; guard(mutex)(&substream->pcm->open_mutex); runtime = substream->runtime; if (!runtime) { snd_iprintf(buffer, "closed\n"); return; } if (runtime->state == SNDRV_PCM_STATE_OPEN) { snd_iprintf(buffer, "no setup\n"); return; } snd_iprintf(buffer, "access: %s\n", snd_pcm_access_name(runtime->access)); snd_iprintf(buffer, "format: %s\n", snd_pcm_format_name(runtime->format)); snd_iprintf(buffer, "subformat: %s\n", snd_pcm_subformat_name(runtime->subformat)); snd_iprintf(buffer, "channels: %u\n", runtime->channels); snd_iprintf(buffer, "rate: %u (%u/%u)\n", runtime->rate, runtime->rate_num, runtime->rate_den); snd_iprintf(buffer, "period_size: %lu\n", runtime->period_size); snd_iprintf(buffer, "buffer_size: %lu\n", runtime->buffer_size); #if IS_ENABLED(CONFIG_SND_PCM_OSS) if (substream->oss.oss) { snd_iprintf(buffer, "OSS format: %s\n", snd_pcm_oss_format_name(runtime->oss.format)); snd_iprintf(buffer, "OSS channels: %u\n", runtime->oss.channels); snd_iprintf(buffer, "OSS rate: %u\n", runtime->oss.rate); snd_iprintf(buffer, "OSS period bytes: %lu\n", (unsigned long)runtime->oss.period_bytes); snd_iprintf(buffer, "OSS periods: %u\n", runtime->oss.periods); snd_iprintf(buffer, "OSS period frames: %lu\n", (unsigned long)runtime->oss.period_frames); } #endif } static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcm_substream *substream = entry->private_data; struct snd_pcm_runtime *runtime; guard(mutex)(&substream->pcm->open_mutex); runtime = substream->runtime; if (!runtime) { snd_iprintf(buffer, "closed\n"); return; } if (runtime->state == SNDRV_PCM_STATE_OPEN) { snd_iprintf(buffer, "no setup\n"); return; } snd_iprintf(buffer, "tstamp_mode: %s\n", snd_pcm_tstamp_mode_name(runtime->tstamp_mode)); snd_iprintf(buffer, "period_step: %u\n", runtime->period_step); snd_iprintf(buffer, "avail_min: %lu\n", runtime->control->avail_min); snd_iprintf(buffer, "start_threshold: %lu\n", runtime->start_threshold); snd_iprintf(buffer, "stop_threshold: %lu\n", runtime->stop_threshold); snd_iprintf(buffer, "silence_threshold: %lu\n", runtime->silence_threshold); snd_iprintf(buffer, "silence_size: %lu\n", runtime->silence_size); snd_iprintf(buffer, "boundary: %lu\n", runtime->boundary); } static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcm_substream *substream = entry->private_data; struct snd_pcm_runtime *runtime; struct snd_pcm_status64 status; int err; guard(mutex)(&substream->pcm->open_mutex); runtime = substream->runtime; if (!runtime) { snd_iprintf(buffer, "closed\n"); return; } memset(&status, 0, sizeof(status)); err = snd_pcm_status64(substream, &status); if (err < 0) { snd_iprintf(buffer, "error %d\n", err); return; } snd_iprintf(buffer, "state: %s\n", snd_pcm_state_name(status.state)); snd_iprintf(buffer, "owner_pid : %d\n", pid_vnr(substream->pid)); snd_iprintf(buffer, "trigger_time: %lld.%09lld\n", status.trigger_tstamp_sec, status.trigger_tstamp_nsec); snd_iprintf(buffer, "tstamp : %lld.%09lld\n", status.tstamp_sec, status.tstamp_nsec); snd_iprintf(buffer, "delay : %ld\n", status.delay); snd_iprintf(buffer, "avail : %ld\n", status.avail); snd_iprintf(buffer, "avail_max : %ld\n", status.avail_max); snd_iprintf(buffer, "-----\n"); snd_iprintf(buffer, "hw_ptr : %ld\n", runtime->status->hw_ptr); snd_iprintf(buffer, "appl_ptr : %ld\n", runtime->control->appl_ptr); } #ifdef CONFIG_SND_PCM_XRUN_DEBUG static void snd_pcm_xrun_injection_write(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcm_substream *substream = entry->private_data; snd_pcm_stop_xrun(substream); } static void snd_pcm_xrun_debug_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcm_str *pstr = entry->private_data; snd_iprintf(buffer, "%d\n", pstr->xrun_debug); } static void snd_pcm_xrun_debug_write(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcm_str *pstr = entry->private_data; char line[64]; if (!snd_info_get_line(buffer, line, sizeof(line))) pstr->xrun_debug = simple_strtoul(line, NULL, 10); } #endif static int snd_pcm_stream_proc_init(struct snd_pcm_str *pstr) { struct snd_pcm *pcm = pstr->pcm; struct snd_info_entry *entry; char name[16]; sprintf(name, "pcm%i%c", pcm->device, pstr->stream == SNDRV_PCM_STREAM_PLAYBACK ? 'p' : 'c'); entry = snd_info_create_card_entry(pcm->card, name, pcm->card->proc_root); if (!entry) return -ENOMEM; entry->mode = S_IFDIR | 0555; pstr->proc_root = entry; entry = snd_info_create_card_entry(pcm->card, "info", pstr->proc_root); if (entry) snd_info_set_text_ops(entry, pstr, snd_pcm_stream_proc_info_read); #ifdef CONFIG_SND_PCM_XRUN_DEBUG entry = snd_info_create_card_entry(pcm->card, "xrun_debug", pstr->proc_root); if (entry) { snd_info_set_text_ops(entry, pstr, snd_pcm_xrun_debug_read); entry->c.text.write = snd_pcm_xrun_debug_write; entry->mode |= 0200; } #endif return 0; } static int snd_pcm_stream_proc_done(struct snd_pcm_str *pstr) { snd_info_free_entry(pstr->proc_root); pstr->proc_root = NULL; return 0; } static struct snd_info_entry * create_substream_info_entry(struct snd_pcm_substream *substream, const char *name, void (*read)(struct snd_info_entry *, struct snd_info_buffer *)) { struct snd_info_entry *entry; entry = snd_info_create_card_entry(substream->pcm->card, name, substream->proc_root); if (entry) snd_info_set_text_ops(entry, substream, read); return entry; } static int snd_pcm_substream_proc_init(struct snd_pcm_substream *substream) { struct snd_info_entry *entry; struct snd_card *card; char name[16]; card = substream->pcm->card; sprintf(name, "sub%i", substream->number); entry = snd_info_create_card_entry(card, name, substream->pstr->proc_root); if (!entry) return -ENOMEM; entry->mode = S_IFDIR | 0555; substream->proc_root = entry; create_substream_info_entry(substream, "info", snd_pcm_substream_proc_info_read); create_substream_info_entry(substream, "hw_params", snd_pcm_substream_proc_hw_params_read); create_substream_info_entry(substream, "sw_params", snd_pcm_substream_proc_sw_params_read); create_substream_info_entry(substream, "status", snd_pcm_substream_proc_status_read); #ifdef CONFIG_SND_PCM_XRUN_DEBUG entry = create_substream_info_entry(substream, "xrun_injection", NULL); if (entry) { entry->c.text.write = snd_pcm_xrun_injection_write; entry->mode = S_IFREG | 0200; } #endif /* CONFIG_SND_PCM_XRUN_DEBUG */ return 0; } #else /* !CONFIG_SND_VERBOSE_PROCFS */ static inline int snd_pcm_stream_proc_init(struct snd_pcm_str *pstr) { return 0; } static inline int snd_pcm_stream_proc_done(struct snd_pcm_str *pstr) { return 0; } static inline int snd_pcm_substream_proc_init(struct snd_pcm_substream *substream) { return 0; } #endif /* CONFIG_SND_VERBOSE_PROCFS */ static const struct attribute_group *pcm_dev_attr_groups[]; /* * PM callbacks: we need to deal only with suspend here, as the resume is * triggered either from user-space or the driver's resume callback */ #ifdef CONFIG_PM_SLEEP static int do_pcm_suspend(struct device *dev) { struct snd_pcm_str *pstr = dev_get_drvdata(dev); if (!pstr->pcm->no_device_suspend) snd_pcm_suspend_all(pstr->pcm); return 0; } #endif static const struct dev_pm_ops pcm_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(do_pcm_suspend, NULL) }; /* device type for PCM -- basically only for passing PM callbacks */ static const struct device_type pcm_dev_type = { .name = "pcm", .pm = &pcm_dev_pm_ops, }; /** * snd_pcm_new_stream - create a new PCM stream * @pcm: the pcm instance * @stream: the stream direction, SNDRV_PCM_STREAM_XXX * @substream_count: the number of substreams * * Creates a new stream for the pcm. * The corresponding stream on the pcm must have been empty before * calling this, i.e. zero must be given to the argument of * snd_pcm_new(). * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_new_stream(struct snd_pcm *pcm, int stream, int substream_count) { int idx, err; struct snd_pcm_str *pstr = &pcm->streams[stream]; struct snd_pcm_substream *substream, *prev; #if IS_ENABLED(CONFIG_SND_PCM_OSS) mutex_init(&pstr->oss.setup_mutex); #endif pstr->stream = stream; pstr->pcm = pcm; pstr->substream_count = substream_count; if (!substream_count) return 0; err = snd_device_alloc(&pstr->dev, pcm->card); if (err < 0) return err; dev_set_name(pstr->dev, "pcmC%iD%i%c", pcm->card->number, pcm->device, stream == SNDRV_PCM_STREAM_PLAYBACK ? 'p' : 'c'); pstr->dev->groups = pcm_dev_attr_groups; pstr->dev->type = &pcm_dev_type; dev_set_drvdata(pstr->dev, pstr); if (!pcm->internal) { err = snd_pcm_stream_proc_init(pstr); if (err < 0) { pcm_err(pcm, "Error in snd_pcm_stream_proc_init\n"); return err; } } prev = NULL; for (idx = 0, prev = NULL; idx < substream_count; idx++) { substream = kzalloc(sizeof(*substream), GFP_KERNEL); if (!substream) return -ENOMEM; substream->pcm = pcm; substream->pstr = pstr; substream->number = idx; substream->stream = stream; sprintf(substream->name, "subdevice #%i", idx); substream->buffer_bytes_max = UINT_MAX; if (prev == NULL) pstr->substream = substream; else prev->next = substream; if (!pcm->internal) { err = snd_pcm_substream_proc_init(substream); if (err < 0) { pcm_err(pcm, "Error in snd_pcm_stream_proc_init\n"); if (prev == NULL) pstr->substream = NULL; else prev->next = NULL; kfree(substream); return err; } } substream->group = &substream->self_group; snd_pcm_group_init(&substream->self_group); list_add_tail(&substream->link_list, &substream->self_group.substreams); atomic_set(&substream->mmap_count, 0); prev = substream; } return 0; } EXPORT_SYMBOL(snd_pcm_new_stream); static int _snd_pcm_new(struct snd_card *card, const char *id, int device, int playback_count, int capture_count, bool internal, struct snd_pcm **rpcm) { struct snd_pcm *pcm; int err; static const struct snd_device_ops ops = { .dev_free = snd_pcm_dev_free, .dev_register = snd_pcm_dev_register, .dev_disconnect = snd_pcm_dev_disconnect, }; static const struct snd_device_ops internal_ops = { .dev_free = snd_pcm_dev_free, }; if (snd_BUG_ON(!card)) return -ENXIO; if (rpcm) *rpcm = NULL; pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); if (!pcm) return -ENOMEM; pcm->card = card; pcm->device = device; pcm->internal = internal; mutex_init(&pcm->open_mutex); init_waitqueue_head(&pcm->open_wait); INIT_LIST_HEAD(&pcm->list); if (id) strscpy(pcm->id, id, sizeof(pcm->id)); err = snd_pcm_new_stream(pcm, SNDRV_PCM_STREAM_PLAYBACK, playback_count); if (err < 0) goto free_pcm; err = snd_pcm_new_stream(pcm, SNDRV_PCM_STREAM_CAPTURE, capture_count); if (err < 0) goto free_pcm; err = snd_device_new(card, SNDRV_DEV_PCM, pcm, internal ? &internal_ops : &ops); if (err < 0) goto free_pcm; if (rpcm) *rpcm = pcm; return 0; free_pcm: snd_pcm_free(pcm); return err; } /** * snd_pcm_new - create a new PCM instance * @card: the card instance * @id: the id string * @device: the device index (zero based) * @playback_count: the number of substreams for playback * @capture_count: the number of substreams for capture * @rpcm: the pointer to store the new pcm instance * * Creates a new PCM instance. * * The pcm operators have to be set afterwards to the new instance * via snd_pcm_set_ops(). * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_new(struct snd_card *card, const char *id, int device, int playback_count, int capture_count, struct snd_pcm **rpcm) { return _snd_pcm_new(card, id, device, playback_count, capture_count, false, rpcm); } EXPORT_SYMBOL(snd_pcm_new); /** * snd_pcm_new_internal - create a new internal PCM instance * @card: the card instance * @id: the id string * @device: the device index (zero based - shared with normal PCMs) * @playback_count: the number of substreams for playback * @capture_count: the number of substreams for capture * @rpcm: the pointer to store the new pcm instance * * Creates a new internal PCM instance with no userspace device or procfs * entries. This is used by ASoC Back End PCMs in order to create a PCM that * will only be used internally by kernel drivers. i.e. it cannot be opened * by userspace. It provides existing ASoC components drivers with a substream * and access to any private data. * * The pcm operators have to be set afterwards to the new instance * via snd_pcm_set_ops(). * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_new_internal(struct snd_card *card, const char *id, int device, int playback_count, int capture_count, struct snd_pcm **rpcm) { return _snd_pcm_new(card, id, device, playback_count, capture_count, true, rpcm); } EXPORT_SYMBOL(snd_pcm_new_internal); static void free_chmap(struct snd_pcm_str *pstr) { if (pstr->chmap_kctl) { struct snd_card *card = pstr->pcm->card; snd_ctl_remove(card, pstr->chmap_kctl); pstr->chmap_kctl = NULL; } } static void snd_pcm_free_stream(struct snd_pcm_str * pstr) { struct snd_pcm_substream *substream, *substream_next; #if IS_ENABLED(CONFIG_SND_PCM_OSS) struct snd_pcm_oss_setup *setup, *setupn; #endif /* free all proc files under the stream */ snd_pcm_stream_proc_done(pstr); substream = pstr->substream; while (substream) { substream_next = substream->next; snd_pcm_timer_done(substream); kfree(substream); substream = substream_next; } #if IS_ENABLED(CONFIG_SND_PCM_OSS) for (setup = pstr->oss.setup_list; setup; setup = setupn) { setupn = setup->next; kfree(setup->task_name); kfree(setup); } #endif free_chmap(pstr); if (pstr->substream_count) put_device(pstr->dev); } #if IS_ENABLED(CONFIG_SND_PCM_OSS) #define pcm_call_notify(pcm, call) \ do { \ struct snd_pcm_notify *_notify; \ list_for_each_entry(_notify, &snd_pcm_notify_list, list) \ _notify->call(pcm); \ } while (0) #else #define pcm_call_notify(pcm, call) do {} while (0) #endif static int snd_pcm_free(struct snd_pcm *pcm) { if (!pcm) return 0; if (!pcm->internal) pcm_call_notify(pcm, n_unregister); if (pcm->private_free) pcm->private_free(pcm); snd_pcm_lib_preallocate_free_for_all(pcm); snd_pcm_free_stream(&pcm->streams[SNDRV_PCM_STREAM_PLAYBACK]); snd_pcm_free_stream(&pcm->streams[SNDRV_PCM_STREAM_CAPTURE]); kfree(pcm); return 0; } static int snd_pcm_dev_free(struct snd_device *device) { struct snd_pcm *pcm = device->device_data; return snd_pcm_free(pcm); } int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream, struct file *file, struct snd_pcm_substream **rsubstream) { struct snd_pcm_str * pstr; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; struct snd_card *card; int prefer_subdevice; size_t size; if (snd_BUG_ON(!pcm || !rsubstream)) return -ENXIO; if (snd_BUG_ON(stream != SNDRV_PCM_STREAM_PLAYBACK && stream != SNDRV_PCM_STREAM_CAPTURE)) return -EINVAL; *rsubstream = NULL; pstr = &pcm->streams[stream]; if (pstr->substream == NULL || pstr->substream_count == 0) return -ENODEV; card = pcm->card; prefer_subdevice = snd_ctl_get_preferred_subdevice(card, SND_CTL_SUBDEV_PCM); if (pcm->info_flags & SNDRV_PCM_INFO_HALF_DUPLEX) { int opposite = !stream; for (substream = pcm->streams[opposite].substream; substream; substream = substream->next) { if (SUBSTREAM_BUSY(substream)) return -EAGAIN; } } if (file->f_flags & O_APPEND) { if (prefer_subdevice < 0) { if (pstr->substream_count > 1) return -EINVAL; /* must be unique */ substream = pstr->substream; } else { for (substream = pstr->substream; substream; substream = substream->next) if (substream->number == prefer_subdevice) break; } if (! substream) return -ENODEV; if (! SUBSTREAM_BUSY(substream)) return -EBADFD; substream->ref_count++; *rsubstream = substream; return 0; } for (substream = pstr->substream; substream; substream = substream->next) { if (!SUBSTREAM_BUSY(substream) && (prefer_subdevice == -1 || substream->number == prefer_subdevice)) break; } if (substream == NULL) return -EAGAIN; runtime = kzalloc(sizeof(*runtime), GFP_KERNEL); if (runtime == NULL) return -ENOMEM; size = PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)); runtime->status = alloc_pages_exact(size, GFP_KERNEL); if (runtime->status == NULL) { kfree(runtime); return -ENOMEM; } memset(runtime->status, 0, size); size = PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)); runtime->control = alloc_pages_exact(size, GFP_KERNEL); if (runtime->control == NULL) { free_pages_exact(runtime->status, PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status))); kfree(runtime); return -ENOMEM; } memset(runtime->control, 0, size); init_waitqueue_head(&runtime->sleep); init_waitqueue_head(&runtime->tsleep); __snd_pcm_set_state(runtime, SNDRV_PCM_STATE_OPEN); mutex_init(&runtime->buffer_mutex); atomic_set(&runtime->buffer_accessing, 0); substream->runtime = runtime; substream->private_data = pcm->private_data; substream->ref_count = 1; substream->f_flags = file->f_flags; substream->pid = get_pid(task_pid(current)); pstr->substream_opened++; *rsubstream = substream; return 0; } void snd_pcm_detach_substream(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; if (PCM_RUNTIME_CHECK(substream)) return; runtime = substream->runtime; if (runtime->private_free != NULL) runtime->private_free(runtime); free_pages_exact(runtime->status, PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status))); free_pages_exact(runtime->control, PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control))); kfree(runtime->hw_constraints.rules); /* Avoid concurrent access to runtime via PCM timer interface */ if (substream->timer) { scoped_guard(spinlock_irq, &substream->timer->lock) substream->runtime = NULL; } else { substream->runtime = NULL; } mutex_destroy(&runtime->buffer_mutex); snd_fasync_free(runtime->fasync); kfree(runtime); put_pid(substream->pid); substream->pid = NULL; substream->pstr->substream_opened--; } static ssize_t pcm_class_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_pcm_str *pstr = dev_get_drvdata(dev); struct snd_pcm *pcm = pstr->pcm; const char *str; static const char *strs[SNDRV_PCM_CLASS_LAST + 1] = { [SNDRV_PCM_CLASS_GENERIC] = "generic", [SNDRV_PCM_CLASS_MULTI] = "multi", [SNDRV_PCM_CLASS_MODEM] = "modem", [SNDRV_PCM_CLASS_DIGITIZER] = "digitizer", }; if (pcm->dev_class > SNDRV_PCM_CLASS_LAST) str = "none"; else str = strs[pcm->dev_class]; return sysfs_emit(buf, "%s\n", str); } static DEVICE_ATTR_RO(pcm_class); static struct attribute *pcm_dev_attrs[] = { &dev_attr_pcm_class.attr, NULL }; static const struct attribute_group pcm_dev_attr_group = { .attrs = pcm_dev_attrs, }; static const struct attribute_group *pcm_dev_attr_groups[] = { &pcm_dev_attr_group, NULL }; static int snd_pcm_dev_register(struct snd_device *device) { int cidx, err; struct snd_pcm_substream *substream; struct snd_pcm *pcm; if (snd_BUG_ON(!device || !device->device_data)) return -ENXIO; pcm = device->device_data; guard(mutex)(&register_mutex); err = snd_pcm_add(pcm); if (err) return err; for (cidx = 0; cidx < 2; cidx++) { int devtype = -1; if (pcm->streams[cidx].substream == NULL) continue; switch (cidx) { case SNDRV_PCM_STREAM_PLAYBACK: devtype = SNDRV_DEVICE_TYPE_PCM_PLAYBACK; break; case SNDRV_PCM_STREAM_CAPTURE: devtype = SNDRV_DEVICE_TYPE_PCM_CAPTURE; break; } /* register pcm */ err = snd_register_device(devtype, pcm->card, pcm->device, &snd_pcm_f_ops[cidx], pcm, pcm->streams[cidx].dev); if (err < 0) { list_del_init(&pcm->list); return err; } for (substream = pcm->streams[cidx].substream; substream; substream = substream->next) snd_pcm_timer_init(substream); } pcm_call_notify(pcm, n_register); return err; } static int snd_pcm_dev_disconnect(struct snd_device *device) { struct snd_pcm *pcm = device->device_data; struct snd_pcm_substream *substream; int cidx; guard(mutex)(&register_mutex); guard(mutex)(&pcm->open_mutex); wake_up(&pcm->open_wait); list_del_init(&pcm->list); for_each_pcm_substream(pcm, cidx, substream) { snd_pcm_stream_lock_irq(substream); if (substream->runtime) { if (snd_pcm_running(substream)) snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED); /* to be sure, set the state unconditionally */ __snd_pcm_set_state(substream->runtime, SNDRV_PCM_STATE_DISCONNECTED); wake_up(&substream->runtime->sleep); wake_up(&substream->runtime->tsleep); } snd_pcm_stream_unlock_irq(substream); } for_each_pcm_substream(pcm, cidx, substream) snd_pcm_sync_stop(substream, false); pcm_call_notify(pcm, n_disconnect); for (cidx = 0; cidx < 2; cidx++) { if (pcm->streams[cidx].dev) snd_unregister_device(pcm->streams[cidx].dev); free_chmap(&pcm->streams[cidx]); } return 0; } #if IS_ENABLED(CONFIG_SND_PCM_OSS) /** * snd_pcm_notify - Add/remove the notify list * @notify: PCM notify list * @nfree: 0 = register, 1 = unregister * * This adds the given notifier to the global list so that the callback is * called for each registered PCM devices. This exists only for PCM OSS * emulation, so far. * * Return: zero if successful, or a negative error code */ int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree) { struct snd_pcm *pcm; if (snd_BUG_ON(!notify || !notify->n_register || !notify->n_unregister || !notify->n_disconnect)) return -EINVAL; guard(mutex)(&register_mutex); if (nfree) { list_del(&notify->list); list_for_each_entry(pcm, &snd_pcm_devices, list) notify->n_unregister(pcm); } else { list_add_tail(&notify->list, &snd_pcm_notify_list); list_for_each_entry(pcm, &snd_pcm_devices, list) notify->n_register(pcm); } return 0; } EXPORT_SYMBOL(snd_pcm_notify); #endif /* CONFIG_SND_PCM_OSS */ #ifdef CONFIG_SND_PROC_FS /* * Info interface */ static void snd_pcm_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcm *pcm; guard(mutex)(&register_mutex); list_for_each_entry(pcm, &snd_pcm_devices, list) { snd_iprintf(buffer, "%02i-%02i: %s : %s", pcm->card->number, pcm->device, pcm->id, pcm->name); if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) snd_iprintf(buffer, " : playback %i", pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream_count); if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) snd_iprintf(buffer, " : capture %i", pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream_count); snd_iprintf(buffer, "\n"); } } static struct snd_info_entry *snd_pcm_proc_entry; static void snd_pcm_proc_init(void) { struct snd_info_entry *entry; entry = snd_info_create_module_entry(THIS_MODULE, "pcm", NULL); if (entry) { snd_info_set_text_ops(entry, NULL, snd_pcm_proc_read); if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } snd_pcm_proc_entry = entry; } static void snd_pcm_proc_done(void) { snd_info_free_entry(snd_pcm_proc_entry); } #else /* !CONFIG_SND_PROC_FS */ #define snd_pcm_proc_init() #define snd_pcm_proc_done() #endif /* CONFIG_SND_PROC_FS */ /* * ENTRY functions */ static int __init alsa_pcm_init(void) { snd_ctl_register_ioctl(snd_pcm_control_ioctl); snd_ctl_register_ioctl_compat(snd_pcm_control_ioctl); snd_pcm_proc_init(); return 0; } static void __exit alsa_pcm_exit(void) { snd_ctl_unregister_ioctl(snd_pcm_control_ioctl); snd_ctl_unregister_ioctl_compat(snd_pcm_control_ioctl); snd_pcm_proc_done(); } module_init(alsa_pcm_init) module_exit(alsa_pcm_exit)
17213 64 17073 4550 16688 16851 12542 16541 11107 526 139 304 16 20 66 260 16 25 224 21 6 15 261 115 146 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 // SPDX-License-Identifier: GPL-2.0-only /* * Access kernel or user memory without faulting. */ #include <linux/export.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <asm/tlb.h> bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) { return true; } #define copy_from_kernel_nofault_loop(dst, src, len, type, err_label) \ while (len >= sizeof(type)) { \ __get_kernel_nofault(dst, src, type, err_label); \ dst += sizeof(type); \ src += sizeof(type); \ len -= sizeof(type); \ } long copy_from_kernel_nofault(void *dst, const void *src, size_t size) { unsigned long align = 0; if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) align = (unsigned long)dst | (unsigned long)src; if (!copy_from_kernel_nofault_allowed(src, size)) return -ERANGE; pagefault_disable(); if (!(align & 7)) copy_from_kernel_nofault_loop(dst, src, size, u64, Efault); if (!(align & 3)) copy_from_kernel_nofault_loop(dst, src, size, u32, Efault); if (!(align & 1)) copy_from_kernel_nofault_loop(dst, src, size, u16, Efault); copy_from_kernel_nofault_loop(dst, src, size, u8, Efault); pagefault_enable(); return 0; Efault: pagefault_enable(); return -EFAULT; } EXPORT_SYMBOL_GPL(copy_from_kernel_nofault); #define copy_to_kernel_nofault_loop(dst, src, len, type, err_label) \ while (len >= sizeof(type)) { \ __put_kernel_nofault(dst, src, type, err_label); \ dst += sizeof(type); \ src += sizeof(type); \ len -= sizeof(type); \ } long copy_to_kernel_nofault(void *dst, const void *src, size_t size) { unsigned long align = 0; if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) align = (unsigned long)dst | (unsigned long)src; pagefault_disable(); if (!(align & 7)) copy_to_kernel_nofault_loop(dst, src, size, u64, Efault); if (!(align & 3)) copy_to_kernel_nofault_loop(dst, src, size, u32, Efault); if (!(align & 1)) copy_to_kernel_nofault_loop(dst, src, size, u16, Efault); copy_to_kernel_nofault_loop(dst, src, size, u8, Efault); pagefault_enable(); return 0; Efault: pagefault_enable(); return -EFAULT; } long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) { const void *src = unsafe_addr; if (unlikely(count <= 0)) return 0; if (!copy_from_kernel_nofault_allowed(unsafe_addr, count)) return -ERANGE; pagefault_disable(); do { __get_kernel_nofault(dst, src, u8, Efault); dst++; src++; } while (dst[-1] && src - unsafe_addr < count); pagefault_enable(); dst[-1] = '\0'; return src - unsafe_addr; Efault: pagefault_enable(); dst[0] = '\0'; return -EFAULT; } /** * copy_from_user_nofault(): safely attempt to read from a user-space location * @dst: pointer to the buffer that shall take the data * @src: address to read from. This must be a user address. * @size: size of the data chunk * * Safely read from user address @src to the buffer at @dst. If a kernel fault * happens, handle that and return -EFAULT. */ long copy_from_user_nofault(void *dst, const void __user *src, size_t size) { long ret = -EFAULT; if (!__access_ok(src, size)) return ret; if (!nmi_uaccess_okay()) return ret; pagefault_disable(); ret = __copy_from_user_inatomic(dst, src, size); pagefault_enable(); if (ret) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(copy_from_user_nofault); /** * copy_to_user_nofault(): safely attempt to write to a user-space location * @dst: address to write to * @src: pointer to the data that shall be written * @size: size of the data chunk * * Safely write to address @dst from the buffer at @src. If a kernel fault * happens, handle that and return -EFAULT. */ long copy_to_user_nofault(void __user *dst, const void *src, size_t size) { long ret = -EFAULT; if (access_ok(dst, size)) { pagefault_disable(); ret = __copy_to_user_inatomic(dst, src, size); pagefault_enable(); } if (ret) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(copy_to_user_nofault); /** * strncpy_from_user_nofault: - Copy a NUL terminated string from unsafe user * address. * @dst: Destination address, in kernel space. This buffer must be at * least @count bytes long. * @unsafe_addr: Unsafe user address. * @count: Maximum number of bytes to copy, including the trailing NUL. * * Copies a NUL-terminated string from unsafe user address to kernel buffer. * * On success, returns the length of the string INCLUDING the trailing NUL. * * If access fails, returns -EFAULT (some data may have been copied * and the trailing NUL added). * * If @count is smaller than the length of the string, copies @count-1 bytes, * sets the last byte of @dst buffer to NUL and returns @count. */ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, long count) { long ret; if (unlikely(count <= 0)) return 0; pagefault_disable(); ret = strncpy_from_user(dst, unsafe_addr, count); pagefault_enable(); if (ret >= count) { ret = count; dst[ret - 1] = '\0'; } else if (ret > 0) { ret++; } return ret; } /** * strnlen_user_nofault: - Get the size of a user string INCLUDING final NUL. * @unsafe_addr: The string to measure. * @count: Maximum count (including NUL) * * Get the size of a NUL-terminated string in user space without pagefault. * * Returns the size of the string INCLUDING the terminating NUL. * * If the string is too long, returns a number larger than @count. User * has to check the return value against "> count". * On exception (or invalid count), returns 0. * * Unlike strnlen_user, this can be used from IRQ handler etc. because * it disables pagefaults. */ long strnlen_user_nofault(const void __user *unsafe_addr, long count) { int ret; pagefault_disable(); ret = strnlen_user(unsafe_addr, count); pagefault_enable(); return ret; } void __copy_overflow(int size, unsigned long count) { WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); } EXPORT_SYMBOL(__copy_overflow);
1026 10338 2038 11 3184 8290 2997 5796 8300 3 7833 7825 1 1 101 103 7795 8767 3490 1549 3491 118 670 4 792 4 791 2 3448 1666 795 2 314 77 243 314 2 314 11085 11026 9423 5064 3933 987 12 4350 1000 526 354 98 249 73 349 519 26 26 26 3 521 526 530 522 524 12 694 587 817 9967 18281 698 695 17139 17176 697 11102 10337 10334 10337 10350 10352 10334 10329 5260 5248 2040 670 38 38 38 38 5397 64 9937 6 1447 12 12 12 10339 10349 699 695 2 695 5014 5018 5015 4895 4904 6 562 540 12 287 46 296 369 370 334 554 12 38 38 38 20 20 12 5803 5761 66 5794 12 9 10 5795 5807 5782 69 53 21 7787 7793 10336 10345 8427 10354 2939 6979 8406 9478 3178 10 3165 230 226 26 434 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2001 Momchil Velikov * Portions Copyright (C) 2001 Christoph Hellwig * Copyright (C) 2005 SGI, Christoph Lameter * Copyright (C) 2006 Nick Piggin * Copyright (C) 2012 Konstantin Khlebnikov * Copyright (C) 2016 Intel, Matthew Wilcox * Copyright (C) 2016 Intel, Ross Zwisler */ #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/bug.h> #include <linux/cpu.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/idr.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/kmemleak.h> #include <linux/percpu.h> #include <linux/preempt.h> /* in_interrupt() */ #include <linux/radix-tree.h> #include <linux/rcupdate.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/xarray.h> #include "radix-tree.h" /* * Radix tree node cache. */ struct kmem_cache *radix_tree_node_cachep; /* * The radix tree is variable-height, so an insert operation not only has * to build the branch to its corresponding item, it also has to build the * branch to existing items if the size has to be increased (by * radix_tree_extend). * * The worst case is a zero height tree with just a single item at index 0, * and then inserting an item at index ULONG_MAX. This requires 2 new branches * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared. * Hence: */ #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) /* * The IDR does not have to be as high as the radix tree since it uses * signed integers, not unsigned longs. */ #define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1) #define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \ RADIX_TREE_MAP_SHIFT)) #define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1) /* * Per-cpu pool of preloaded nodes */ DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { .lock = INIT_LOCAL_LOCK(lock), }; EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads); static inline struct radix_tree_node *entry_to_node(void *ptr) { return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); } static inline void *node_to_entry(void *ptr) { return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE); } #define RADIX_TREE_RETRY XA_RETRY_ENTRY static inline unsigned long get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot) { return parent ? slot - parent->slots : 0; } static unsigned int radix_tree_descend(const struct radix_tree_node *parent, struct radix_tree_node **nodep, unsigned long index) { unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; void __rcu **entry = rcu_dereference_raw(parent->slots[offset]); *nodep = (void *)entry; return offset; } static inline gfp_t root_gfp_mask(const struct radix_tree_root *root) { return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK); } static inline void tag_set(struct radix_tree_node *node, unsigned int tag, int offset) { __set_bit(offset, node->tags[tag]); } static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, int offset) { __clear_bit(offset, node->tags[tag]); } static inline int tag_get(const struct radix_tree_node *node, unsigned int tag, int offset) { return test_bit(offset, node->tags[tag]); } static inline void root_tag_set(struct radix_tree_root *root, unsigned tag) { root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT)); } static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) { root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT)); } static inline void root_tag_clear_all(struct radix_tree_root *root) { root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1); } static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag) { return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT)); } static inline unsigned root_tags_get(const struct radix_tree_root *root) { return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT; } static inline bool is_idr(const struct radix_tree_root *root) { return !!(root->xa_flags & ROOT_IS_IDR); } /* * Returns 1 if any slot in the node has this tag set. * Otherwise returns 0. */ static inline int any_tag_set(const struct radix_tree_node *node, unsigned int tag) { unsigned idx; for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { if (node->tags[tag][idx]) return 1; } return 0; } static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag) { bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE); } /** * radix_tree_find_next_bit - find the next set bit in a memory region * * @node: where to begin the search * @tag: the tag index * @offset: the bitnumber to start searching at * * Unrollable variant of find_next_bit() for constant size arrays. * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero. * Returns next bit offset, or size if nothing found. */ static __always_inline unsigned long radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag, unsigned long offset) { const unsigned long *addr = node->tags[tag]; if (offset < RADIX_TREE_MAP_SIZE) { unsigned long tmp; addr += offset / BITS_PER_LONG; tmp = *addr >> (offset % BITS_PER_LONG); if (tmp) return __ffs(tmp) + offset; offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1); while (offset < RADIX_TREE_MAP_SIZE) { tmp = *++addr; if (tmp) return __ffs(tmp) + offset; offset += BITS_PER_LONG; } } return RADIX_TREE_MAP_SIZE; } static unsigned int iter_offset(const struct radix_tree_iter *iter) { return iter->index & RADIX_TREE_MAP_MASK; } /* * The maximum index which can be stored in a radix tree */ static inline unsigned long shift_maxindex(unsigned int shift) { return (RADIX_TREE_MAP_SIZE << shift) - 1; } static inline unsigned long node_maxindex(const struct radix_tree_node *node) { return shift_maxindex(node->shift); } static unsigned long next_index(unsigned long index, const struct radix_tree_node *node, unsigned long offset) { return (index & ~node_maxindex(node)) + (offset << node->shift); } /* * This assumes that the caller has performed appropriate preallocation, and * that the caller has pinned this thread of control to the current CPU. */ static struct radix_tree_node * radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, struct radix_tree_root *root, unsigned int shift, unsigned int offset, unsigned int count, unsigned int nr_values) { struct radix_tree_node *ret = NULL; /* * Preload code isn't irq safe and it doesn't make sense to use * preloading during an interrupt anyway as all the allocations have * to be atomic. So just do normal allocation when in interrupt. */ if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { struct radix_tree_preload *rtp; /* * Even if the caller has preloaded, try to allocate from the * cache first for the new node to get accounted to the memory * cgroup. */ ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask | __GFP_NOWARN); if (ret) goto out; /* * Provided the caller has preloaded here, we will always * succeed in getting a node here (and never reach * kmem_cache_alloc) */ rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr) { ret = rtp->nodes; rtp->nodes = ret->parent; rtp->nr--; } /* * Update the allocation stack trace as this is more useful * for debugging. */ kmemleak_update_trace(ret); goto out; } ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); out: BUG_ON(radix_tree_is_internal_node(ret)); if (ret) { ret->shift = shift; ret->offset = offset; ret->count = count; ret->nr_values = nr_values; ret->parent = parent; ret->array = root; } return ret; } void radix_tree_node_rcu_free(struct rcu_head *head) { struct radix_tree_node *node = container_of(head, struct radix_tree_node, rcu_head); /* * Must only free zeroed nodes into the slab. We can be left with * non-NULL entries by radix_tree_free_nodes, so clear the entries * and tags here. */ memset(node->slots, 0, sizeof(node->slots)); memset(node->tags, 0, sizeof(node->tags)); INIT_LIST_HEAD(&node->private_list); kmem_cache_free(radix_tree_node_cachep, node); } static inline void radix_tree_node_free(struct radix_tree_node *node) { call_rcu(&node->rcu_head, radix_tree_node_rcu_free); } /* * Load up this CPU's radix_tree_node buffer with sufficient objects to * ensure that the addition of a single element in the tree cannot fail. On * success, return zero, with preemption disabled. On error, return -ENOMEM * with preemption not disabled. * * To make use of this facility, the radix tree must be initialised without * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). */ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) { struct radix_tree_preload *rtp; struct radix_tree_node *node; int ret = -ENOMEM; /* * Nodes preloaded by one cgroup can be used by another cgroup, so * they should never be accounted to any particular memory cgroup. */ gfp_mask &= ~__GFP_ACCOUNT; local_lock(&radix_tree_preloads.lock); rtp = this_cpu_ptr(&radix_tree_preloads); while (rtp->nr < nr) { local_unlock(&radix_tree_preloads.lock); node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); if (node == NULL) goto out; local_lock(&radix_tree_preloads.lock); rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr < nr) { node->parent = rtp->nodes; rtp->nodes = node; rtp->nr++; } else { kmem_cache_free(radix_tree_node_cachep, node); } } ret = 0; out: return ret; } /* * Load up this CPU's radix_tree_node buffer with sufficient objects to * ensure that the addition of a single element in the tree cannot fail. On * success, return zero, with preemption disabled. On error, return -ENOMEM * with preemption not disabled. * * To make use of this facility, the radix tree must be initialised without * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). */ int radix_tree_preload(gfp_t gfp_mask) { /* Warn on non-sensical use... */ WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); } EXPORT_SYMBOL(radix_tree_preload); /* * The same as above function, except we don't guarantee preloading happens. * We do it, if we decide it helps. On success, return zero with preemption * disabled. On error, return -ENOMEM with preemption not disabled. */ int radix_tree_maybe_preload(gfp_t gfp_mask) { if (gfpflags_allow_blocking(gfp_mask)) return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); /* Preloading doesn't help anything with this gfp mask, skip it */ local_lock(&radix_tree_preloads.lock); return 0; } EXPORT_SYMBOL(radix_tree_maybe_preload); static unsigned radix_tree_load_root(const struct radix_tree_root *root, struct radix_tree_node **nodep, unsigned long *maxindex) { struct radix_tree_node *node = rcu_dereference_raw(root->xa_head); *nodep = node; if (likely(radix_tree_is_internal_node(node))) { node = entry_to_node(node); *maxindex = node_maxindex(node); return node->shift + RADIX_TREE_MAP_SHIFT; } *maxindex = 0; return 0; } /* * Extend a radix tree so it can store key @index. */ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp, unsigned long index, unsigned int shift) { void *entry; unsigned int maxshift; int tag; /* Figure out what the shift should be. */ maxshift = shift; while (index > shift_maxindex(maxshift)) maxshift += RADIX_TREE_MAP_SHIFT; entry = rcu_dereference_raw(root->xa_head); if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE))) goto out; do { struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL, root, shift, 0, 1, 0); if (!node) return -ENOMEM; if (is_idr(root)) { all_tag_set(node, IDR_FREE); if (!root_tag_get(root, IDR_FREE)) { tag_clear(node, IDR_FREE, 0); root_tag_set(root, IDR_FREE); } } else { /* Propagate the aggregated tag info to the new child */ for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { if (root_tag_get(root, tag)) tag_set(node, tag, 0); } } BUG_ON(shift > BITS_PER_LONG); if (radix_tree_is_internal_node(entry)) { entry_to_node(entry)->parent = node; } else if (xa_is_value(entry)) { /* Moving a value entry root->xa_head to a node */ node->nr_values = 1; } /* * entry was already in the radix tree, so we do not need * rcu_assign_pointer here */ node->slots[0] = (void __rcu *)entry; entry = node_to_entry(node); rcu_assign_pointer(root->xa_head, entry); shift += RADIX_TREE_MAP_SHIFT; } while (shift <= maxshift); out: return maxshift + RADIX_TREE_MAP_SHIFT; } /** * radix_tree_shrink - shrink radix tree to minimum height * @root: radix tree root */ static inline bool radix_tree_shrink(struct radix_tree_root *root) { bool shrunk = false; for (;;) { struct radix_tree_node *node = rcu_dereference_raw(root->xa_head); struct radix_tree_node *child; if (!radix_tree_is_internal_node(node)) break; node = entry_to_node(node); /* * The candidate node has more than one child, or its child * is not at the leftmost slot, we cannot shrink. */ if (node->count != 1) break; child = rcu_dereference_raw(node->slots[0]); if (!child) break; /* * For an IDR, we must not shrink entry 0 into the root in * case somebody calls idr_replace() with a pointer that * appears to be an internal entry */ if (!node->shift && is_idr(root)) break; if (radix_tree_is_internal_node(child)) entry_to_node(child)->parent = NULL; /* * We don't need rcu_assign_pointer(), since we are simply * moving the node from one part of the tree to another: if it * was safe to dereference the old pointer to it * (node->slots[0]), it will be safe to dereference the new * one (root->xa_head) as far as dependent read barriers go. */ root->xa_head = (void __rcu *)child; if (is_idr(root) && !tag_get(node, IDR_FREE, 0)) root_tag_clear(root, IDR_FREE); /* * We have a dilemma here. The node's slot[0] must not be * NULLed in case there are concurrent lookups expecting to * find the item. However if this was a bottom-level node, * then it may be subject to the slot pointer being visible * to callers dereferencing it. If item corresponding to * slot[0] is subsequently deleted, these callers would expect * their slot to become empty sooner or later. * * For example, lockless pagecache will look up a slot, deref * the page pointer, and if the page has 0 refcount it means it * was concurrently deleted from pagecache so try the deref * again. Fortunately there is already a requirement for logic * to retry the entire slot lookup -- the indirect pointer * problem (replacing direct root node with an indirect pointer * also results in a stale slot). So tag the slot as indirect * to force callers to retry. */ node->count = 0; if (!radix_tree_is_internal_node(child)) { node->slots[0] = (void __rcu *)RADIX_TREE_RETRY; } WARN_ON_ONCE(!list_empty(&node->private_list)); radix_tree_node_free(node); shrunk = true; } return shrunk; } static bool delete_node(struct radix_tree_root *root, struct radix_tree_node *node) { bool deleted = false; do { struct radix_tree_node *parent; if (node->count) { if (node_to_entry(node) == rcu_dereference_raw(root->xa_head)) deleted |= radix_tree_shrink(root); return deleted; } parent = node->parent; if (parent) { parent->slots[node->offset] = NULL; parent->count--; } else { /* * Shouldn't the tags already have all been cleared * by the caller? */ if (!is_idr(root)) root_tag_clear_all(root); root->xa_head = NULL; } WARN_ON_ONCE(!list_empty(&node->private_list)); radix_tree_node_free(node); deleted = true; node = parent; } while (node); return deleted; } /** * __radix_tree_create - create a slot in a radix tree * @root: radix tree root * @index: index key * @nodep: returns node * @slotp: returns slot * * Create, if necessary, and return the node and slot for an item * at position @index in the radix tree @root. * * Until there is more than one item in the tree, no nodes are * allocated and @root->xa_head is used as a direct slot instead of * pointing to a node, in which case *@nodep will be NULL. * * Returns -ENOMEM, or 0 for success. */ static int __radix_tree_create(struct radix_tree_root *root, unsigned long index, struct radix_tree_node **nodep, void __rcu ***slotp) { struct radix_tree_node *node = NULL, *child; void __rcu **slot = (void __rcu **)&root->xa_head; unsigned long maxindex; unsigned int shift, offset = 0; unsigned long max = index; gfp_t gfp = root_gfp_mask(root); shift = radix_tree_load_root(root, &child, &maxindex); /* Make sure the tree is high enough. */ if (max > maxindex) { int error = radix_tree_extend(root, gfp, max, shift); if (error < 0) return error; shift = error; child = rcu_dereference_raw(root->xa_head); } while (shift > 0) { shift -= RADIX_TREE_MAP_SHIFT; if (child == NULL) { /* Have to add a child node. */ child = radix_tree_node_alloc(gfp, node, root, shift, offset, 0, 0); if (!child) return -ENOMEM; rcu_assign_pointer(*slot, node_to_entry(child)); if (node) node->count++; } else if (!radix_tree_is_internal_node(child)) break; /* Go a level down */ node = entry_to_node(child); offset = radix_tree_descend(node, &child, index); slot = &node->slots[offset]; } if (nodep) *nodep = node; if (slotp) *slotp = slot; return 0; } /* * Free any nodes below this node. The tree is presumed to not need * shrinking, and any user data in the tree is presumed to not need a * destructor called on it. If we need to add a destructor, we can * add that functionality later. Note that we may not clear tags or * slots from the tree as an RCU walker may still have a pointer into * this subtree. We could replace the entries with RADIX_TREE_RETRY, * but we'll still have to clear those in rcu_free. */ static void radix_tree_free_nodes(struct radix_tree_node *node) { unsigned offset = 0; struct radix_tree_node *child = entry_to_node(node); for (;;) { void *entry = rcu_dereference_raw(child->slots[offset]); if (xa_is_node(entry) && child->shift) { child = entry_to_node(entry); offset = 0; continue; } offset++; while (offset == RADIX_TREE_MAP_SIZE) { struct radix_tree_node *old = child; offset = child->offset + 1; child = child->parent; WARN_ON_ONCE(!list_empty(&old->private_list)); radix_tree_node_free(old); if (old == entry_to_node(node)) return; } } } static inline int insert_entries(struct radix_tree_node *node, void __rcu **slot, void *item) { if (*slot) return -EEXIST; rcu_assign_pointer(*slot, item); if (node) { node->count++; if (xa_is_value(item)) node->nr_values++; } return 1; } /** * radix_tree_insert - insert into a radix tree * @root: radix tree root * @index: index key * @item: item to insert * * Insert an item into the radix tree at position @index. */ int radix_tree_insert(struct radix_tree_root *root, unsigned long index, void *item) { struct radix_tree_node *node; void __rcu **slot; int error; BUG_ON(radix_tree_is_internal_node(item)); error = __radix_tree_create(root, index, &node, &slot); if (error) return error; error = insert_entries(node, slot, item); if (error < 0) return error; if (node) { unsigned offset = get_slot_offset(node, slot); BUG_ON(tag_get(node, 0, offset)); BUG_ON(tag_get(node, 1, offset)); BUG_ON(tag_get(node, 2, offset)); } else { BUG_ON(root_tags_get(root)); } return 0; } EXPORT_SYMBOL(radix_tree_insert); /** * __radix_tree_lookup - lookup an item in a radix tree * @root: radix tree root * @index: index key * @nodep: returns node * @slotp: returns slot * * Lookup and return the item at position @index in the radix * tree @root. * * Until there is more than one item in the tree, no nodes are * allocated and @root->xa_head is used as a direct slot instead of * pointing to a node, in which case *@nodep will be NULL. */ void *__radix_tree_lookup(const struct radix_tree_root *root, unsigned long index, struct radix_tree_node **nodep, void __rcu ***slotp) { struct radix_tree_node *node, *parent; unsigned long maxindex; void __rcu **slot; restart: parent = NULL; slot = (void __rcu **)&root->xa_head; radix_tree_load_root(root, &node, &maxindex); if (index > maxindex) return NULL; while (radix_tree_is_internal_node(node)) { unsigned offset; parent = entry_to_node(node); offset = radix_tree_descend(parent, &node, index); slot = parent->slots + offset; if (node == RADIX_TREE_RETRY) goto restart; if (parent->shift == 0) break; } if (nodep) *nodep = parent; if (slotp) *slotp = slot; return node; } /** * radix_tree_lookup_slot - lookup a slot in a radix tree * @root: radix tree root * @index: index key * * Returns: the slot corresponding to the position @index in the * radix tree @root. This is useful for update-if-exists operations. * * This function can be called under rcu_read_lock iff the slot is not * modified by radix_tree_replace_slot, otherwise it must be called * exclusive from other writers. Any dereference of the slot must be done * using radix_tree_deref_slot. */ void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root, unsigned long index) { void __rcu **slot; if (!__radix_tree_lookup(root, index, NULL, &slot)) return NULL; return slot; } EXPORT_SYMBOL(radix_tree_lookup_slot); /** * radix_tree_lookup - perform lookup operation on a radix tree * @root: radix tree root * @index: index key * * Lookup the item at the position @index in the radix tree @root. * * This function can be called under rcu_read_lock, however the caller * must manage lifetimes of leaf nodes (eg. RCU may also be used to free * them safely). No RCU barriers are required to access or modify the * returned item, however. */ void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index) { return __radix_tree_lookup(root, index, NULL, NULL); } EXPORT_SYMBOL(radix_tree_lookup); static void replace_slot(void __rcu **slot, void *item, struct radix_tree_node *node, int count, int values) { if (node && (count || values)) { node->count += count; node->nr_values += values; } rcu_assign_pointer(*slot, item); } static bool node_tag_get(const struct radix_tree_root *root, const struct radix_tree_node *node, unsigned int tag, unsigned int offset) { if (node) return tag_get(node, tag, offset); return root_tag_get(root, tag); } /* * IDR users want to be able to store NULL in the tree, so if the slot isn't * free, don't adjust the count, even if it's transitioning between NULL and * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still * have empty bits, but it only stores NULL in slots when they're being * deleted. */ static int calculate_count(struct radix_tree_root *root, struct radix_tree_node *node, void __rcu **slot, void *item, void *old) { if (is_idr(root)) { unsigned offset = get_slot_offset(node, slot); bool free = node_tag_get(root, node, IDR_FREE, offset); if (!free) return 0; if (!old) return 1; } return !!item - !!old; } /** * __radix_tree_replace - replace item in a slot * @root: radix tree root * @node: pointer to tree node * @slot: pointer to slot in @node * @item: new item to store in the slot. * * For use with __radix_tree_lookup(). Caller must hold tree write locked * across slot lookup and replacement. */ void __radix_tree_replace(struct radix_tree_root *root, struct radix_tree_node *node, void __rcu **slot, void *item) { void *old = rcu_dereference_raw(*slot); int values = !!xa_is_value(item) - !!xa_is_value(old); int count = calculate_count(root, node, slot, item, old); /* * This function supports replacing value entries and * deleting entries, but that needs accounting against the * node unless the slot is root->xa_head. */ WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) && (count || values)); replace_slot(slot, item, node, count, values); if (!node) return; delete_node(root, node); } /** * radix_tree_replace_slot - replace item in a slot * @root: radix tree root * @slot: pointer to slot * @item: new item to store in the slot. * * For use with radix_tree_lookup_slot() and * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked * across slot lookup and replacement. * * NOTE: This cannot be used to switch between non-entries (empty slots), * regular entries, and value entries, as that requires accounting * inside the radix tree node. When switching from one type of entry or * deleting, use __radix_tree_lookup() and __radix_tree_replace() or * radix_tree_iter_replace(). */ void radix_tree_replace_slot(struct radix_tree_root *root, void __rcu **slot, void *item) { __radix_tree_replace(root, NULL, slot, item); } EXPORT_SYMBOL(radix_tree_replace_slot); /** * radix_tree_iter_replace - replace item in a slot * @root: radix tree root * @iter: iterator state * @slot: pointer to slot * @item: new item to store in the slot. * * For use with radix_tree_for_each_slot(). * Caller must hold tree write locked. */ void radix_tree_iter_replace(struct radix_tree_root *root, const struct radix_tree_iter *iter, void __rcu **slot, void *item) { __radix_tree_replace(root, iter->node, slot, item); } static void node_tag_set(struct radix_tree_root *root, struct radix_tree_node *node, unsigned int tag, unsigned int offset) { while (node) { if (tag_get(node, tag, offset)) return; tag_set(node, tag, offset); offset = node->offset; node = node->parent; } if (!root_tag_get(root, tag)) root_tag_set(root, tag); } /** * radix_tree_tag_set - set a tag on a radix tree node * @root: radix tree root * @index: index key * @tag: tag index * * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) * corresponding to @index in the radix tree. From * the root all the way down to the leaf node. * * Returns the address of the tagged item. Setting a tag on a not-present * item is a bug. */ void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, unsigned int tag) { struct radix_tree_node *node, *parent; unsigned long maxindex; radix_tree_load_root(root, &node, &maxindex); BUG_ON(index > maxindex); while (radix_tree_is_internal_node(node)) { unsigned offset; parent = entry_to_node(node); offset = radix_tree_descend(parent, &node, index); BUG_ON(!node); if (!tag_get(parent, tag, offset)) tag_set(parent, tag, offset); } /* set the root's tag bit */ if (!root_tag_get(root, tag)) root_tag_set(root, tag); return node; } EXPORT_SYMBOL(radix_tree_tag_set); static void node_tag_clear(struct radix_tree_root *root, struct radix_tree_node *node, unsigned int tag, unsigned int offset) { while (node) { if (!tag_get(node, tag, offset)) return; tag_clear(node, tag, offset); if (any_tag_set(node, tag)) return; offset = node->offset; node = node->parent; } /* clear the root's tag bit */ if (root_tag_get(root, tag)) root_tag_clear(root, tag); } /** * radix_tree_tag_clear - clear a tag on a radix tree node * @root: radix tree root * @index: index key * @tag: tag index * * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) * corresponding to @index in the radix tree. If this causes * the leaf node to have no tags set then clear the tag in the * next-to-leaf node, etc. * * Returns the address of the tagged item on success, else NULL. ie: * has the same return value and semantics as radix_tree_lookup(). */ void *radix_tree_tag_clear(struct radix_tree_root *root, unsigned long index, unsigned int tag) { struct radix_tree_node *node, *parent; unsigned long maxindex; int offset = 0; radix_tree_load_root(root, &node, &maxindex); if (index > maxindex) return NULL; parent = NULL; while (radix_tree_is_internal_node(node)) { parent = entry_to_node(node); offset = radix_tree_descend(parent, &node, index); } if (node) node_tag_clear(root, parent, tag, offset); return node; } EXPORT_SYMBOL(radix_tree_tag_clear); /** * radix_tree_iter_tag_clear - clear a tag on the current iterator entry * @root: radix tree root * @iter: iterator state * @tag: tag to clear */ void radix_tree_iter_tag_clear(struct radix_tree_root *root, const struct radix_tree_iter *iter, unsigned int tag) { node_tag_clear(root, iter->node, tag, iter_offset(iter)); } /** * radix_tree_tag_get - get a tag on a radix tree node * @root: radix tree root * @index: index key * @tag: tag index (< RADIX_TREE_MAX_TAGS) * * Return values: * * 0: tag not present or not set * 1: tag set * * Note that the return value of this function may not be relied on, even if * the RCU lock is held, unless tag modification and node deletion are excluded * from concurrency. */ int radix_tree_tag_get(const struct radix_tree_root *root, unsigned long index, unsigned int tag) { struct radix_tree_node *node, *parent; unsigned long maxindex; if (!root_tag_get(root, tag)) return 0; radix_tree_load_root(root, &node, &maxindex); if (index > maxindex) return 0; while (radix_tree_is_internal_node(node)) { unsigned offset; parent = entry_to_node(node); offset = radix_tree_descend(parent, &node, index); if (!tag_get(parent, tag, offset)) return 0; if (node == RADIX_TREE_RETRY) break; } return 1; } EXPORT_SYMBOL(radix_tree_tag_get); /* Construct iter->tags bit-mask from node->tags[tag] array */ static void set_iter_tags(struct radix_tree_iter *iter, struct radix_tree_node *node, unsigned offset, unsigned tag) { unsigned tag_long = offset / BITS_PER_LONG; unsigned tag_bit = offset % BITS_PER_LONG; if (!node) { iter->tags = 1; return; } iter->tags = node->tags[tag][tag_long] >> tag_bit; /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ if (tag_long < RADIX_TREE_TAG_LONGS - 1) { /* Pick tags from next element */ if (tag_bit) iter->tags |= node->tags[tag][tag_long + 1] << (BITS_PER_LONG - tag_bit); /* Clip chunk size, here only BITS_PER_LONG tags */ iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG); } } void __rcu **radix_tree_iter_resume(void __rcu **slot, struct radix_tree_iter *iter) { iter->index = __radix_tree_iter_add(iter, 1); iter->next_index = iter->index; iter->tags = 0; return NULL; } EXPORT_SYMBOL(radix_tree_iter_resume); /** * radix_tree_next_chunk - find next chunk of slots for iteration * * @root: radix tree root * @iter: iterator state * @flags: RADIX_TREE_ITER_* flags and tag index * Returns: pointer to chunk first slot, or NULL if iteration is over */ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root, struct radix_tree_iter *iter, unsigned flags) { unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; struct radix_tree_node *node, *child; unsigned long index, offset, maxindex; if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) return NULL; /* * Catch next_index overflow after ~0UL. iter->index never overflows * during iterating; it can be zero only at the beginning. * And we cannot overflow iter->next_index in a single step, * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. * * This condition also used by radix_tree_next_slot() to stop * contiguous iterating, and forbid switching to the next chunk. */ index = iter->next_index; if (!index && iter->index) return NULL; restart: radix_tree_load_root(root, &child, &maxindex); if (index > maxindex) return NULL; if (!child) return NULL; if (!radix_tree_is_internal_node(child)) { /* Single-slot tree */ iter->index = index; iter->next_index = maxindex + 1; iter->tags = 1; iter->node = NULL; return (void __rcu **)&root->xa_head; } do { node = entry_to_node(child); offset = radix_tree_descend(node, &child, index); if ((flags & RADIX_TREE_ITER_TAGGED) ? !tag_get(node, tag, offset) : !child) { /* Hole detected */ if (flags & RADIX_TREE_ITER_CONTIG) return NULL; if (flags & RADIX_TREE_ITER_TAGGED) offset = radix_tree_find_next_bit(node, tag, offset + 1); else while (++offset < RADIX_TREE_MAP_SIZE) { void *slot = rcu_dereference_raw( node->slots[offset]); if (slot) break; } index &= ~node_maxindex(node); index += offset << node->shift; /* Overflow after ~0UL */ if (!index) return NULL; if (offset == RADIX_TREE_MAP_SIZE) goto restart; child = rcu_dereference_raw(node->slots[offset]); } if (!child) goto restart; if (child == RADIX_TREE_RETRY) break; } while (node->shift && radix_tree_is_internal_node(child)); /* Update the iterator state */ iter->index = (index &~ node_maxindex(node)) | offset; iter->next_index = (index | node_maxindex(node)) + 1; iter->node = node; if (flags & RADIX_TREE_ITER_TAGGED) set_iter_tags(iter, node, offset, tag); return node->slots + offset; } EXPORT_SYMBOL(radix_tree_next_chunk); /** * radix_tree_gang_lookup - perform multiple lookup on a radix tree * @root: radix tree root * @results: where the results of the lookup are placed * @first_index: start the lookup from this key * @max_items: place up to this many items at *results * * Performs an index-ascending scan of the tree for present items. Places * them at *@results and returns the number of items which were placed at * *@results. * * The implementation is naive. * * Like radix_tree_lookup, radix_tree_gang_lookup may be called under * rcu_read_lock. In this case, rather than the returned results being * an atomic snapshot of the tree at a single point in time, the * semantics of an RCU protected gang lookup are as though multiple * radix_tree_lookups have been issued in individual locks, and results * stored in 'results'. */ unsigned int radix_tree_gang_lookup(const struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items) { struct radix_tree_iter iter; void __rcu **slot; unsigned int ret = 0; if (unlikely(!max_items)) return 0; radix_tree_for_each_slot(slot, root, &iter, first_index) { results[ret] = rcu_dereference_raw(*slot); if (!results[ret]) continue; if (radix_tree_is_internal_node(results[ret])) { slot = radix_tree_iter_retry(&iter); continue; } if (++ret == max_items) break; } return ret; } EXPORT_SYMBOL(radix_tree_gang_lookup); /** * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree * based on a tag * @root: radix tree root * @results: where the results of the lookup are placed * @first_index: start the lookup from this key * @max_items: place up to this many items at *results * @tag: the tag index (< RADIX_TREE_MAX_TAGS) * * Performs an index-ascending scan of the tree for present items which * have the tag indexed by @tag set. Places the items at *@results and * returns the number of items which were placed at *@results. */ unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items, unsigned int tag) { struct radix_tree_iter iter; void __rcu **slot; unsigned int ret = 0; if (unlikely(!max_items)) return 0; radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { results[ret] = rcu_dereference_raw(*slot); if (!results[ret]) continue; if (radix_tree_is_internal_node(results[ret])) { slot = radix_tree_iter_retry(&iter); continue; } if (++ret == max_items) break; } return ret; } EXPORT_SYMBOL(radix_tree_gang_lookup_tag); /** * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a * radix tree based on a tag * @root: radix tree root * @results: where the results of the lookup are placed * @first_index: start the lookup from this key * @max_items: place up to this many items at *results * @tag: the tag index (< RADIX_TREE_MAX_TAGS) * * Performs an index-ascending scan of the tree for present items which * have the tag indexed by @tag set. Places the slots at *@results and * returns the number of slots which were placed at *@results. */ unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root, void __rcu ***results, unsigned long first_index, unsigned int max_items, unsigned int tag) { struct radix_tree_iter iter; void __rcu **slot; unsigned int ret = 0; if (unlikely(!max_items)) return 0; radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { results[ret] = slot; if (++ret == max_items) break; } return ret; } EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); static bool __radix_tree_delete(struct radix_tree_root *root, struct radix_tree_node *node, void __rcu **slot) { void *old = rcu_dereference_raw(*slot); int values = xa_is_value(old) ? -1 : 0; unsigned offset = get_slot_offset(node, slot); int tag; if (is_idr(root)) node_tag_set(root, node, IDR_FREE, offset); else for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) node_tag_clear(root, node, tag, offset); replace_slot(slot, NULL, node, -1, values); return node && delete_node(root, node); } /** * radix_tree_iter_delete - delete the entry at this iterator position * @root: radix tree root * @iter: iterator state * @slot: pointer to slot * * Delete the entry at the position currently pointed to by the iterator. * This may result in the current node being freed; if it is, the iterator * is advanced so that it will not reference the freed memory. This * function may be called without any locking if there are no other threads * which can access this tree. */ void radix_tree_iter_delete(struct radix_tree_root *root, struct radix_tree_iter *iter, void __rcu **slot) { if (__radix_tree_delete(root, iter->node, slot)) iter->index = iter->next_index; } EXPORT_SYMBOL(radix_tree_iter_delete); /** * radix_tree_delete_item - delete an item from a radix tree * @root: radix tree root * @index: index key * @item: expected item * * Remove @item at @index from the radix tree rooted at @root. * * Return: the deleted entry, or %NULL if it was not present * or the entry at the given @index was not @item. */ void *radix_tree_delete_item(struct radix_tree_root *root, unsigned long index, void *item) { struct radix_tree_node *node = NULL; void __rcu **slot = NULL; void *entry; entry = __radix_tree_lookup(root, index, &node, &slot); if (!slot) return NULL; if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, get_slot_offset(node, slot)))) return NULL; if (item && entry != item) return NULL; __radix_tree_delete(root, node, slot); return entry; } EXPORT_SYMBOL(radix_tree_delete_item); /** * radix_tree_delete - delete an entry from a radix tree * @root: radix tree root * @index: index key * * Remove the entry at @index from the radix tree rooted at @root. * * Return: The deleted entry, or %NULL if it was not present. */ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) { return radix_tree_delete_item(root, index, NULL); } EXPORT_SYMBOL(radix_tree_delete); /** * radix_tree_tagged - test whether any items in the tree are tagged * @root: radix tree root * @tag: tag to test */ int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag) { return root_tag_get(root, tag); } EXPORT_SYMBOL(radix_tree_tagged); /** * idr_preload - preload for idr_alloc() * @gfp_mask: allocation mask to use for preloading * * Preallocate memory to use for the next call to idr_alloc(). This function * returns with preemption disabled. It will be enabled by idr_preload_end(). */ void idr_preload(gfp_t gfp_mask) { if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE)) local_lock(&radix_tree_preloads.lock); } EXPORT_SYMBOL(idr_preload); void __rcu **idr_get_free(struct radix_tree_root *root, struct radix_tree_iter *iter, gfp_t gfp, unsigned long max) { struct radix_tree_node *node = NULL, *child; void __rcu **slot = (void __rcu **)&root->xa_head; unsigned long maxindex, start = iter->next_index; unsigned int shift, offset = 0; grow: shift = radix_tree_load_root(root, &child, &maxindex); if (!radix_tree_tagged(root, IDR_FREE)) start = max(start, maxindex + 1); if (start > max) return ERR_PTR(-ENOSPC); if (start > maxindex) { int error = radix_tree_extend(root, gfp, start, shift); if (error < 0) return ERR_PTR(error); shift = error; child = rcu_dereference_raw(root->xa_head); } if (start == 0 && shift == 0) shift = RADIX_TREE_MAP_SHIFT; while (shift) { shift -= RADIX_TREE_MAP_SHIFT; if (child == NULL) { /* Have to add a child node. */ child = radix_tree_node_alloc(gfp, node, root, shift, offset, 0, 0); if (!child) return ERR_PTR(-ENOMEM); all_tag_set(child, IDR_FREE); rcu_assign_pointer(*slot, node_to_entry(child)); if (node) node->count++; } else if (!radix_tree_is_internal_node(child)) break; node = entry_to_node(child); offset = radix_tree_descend(node, &child, start); if (!tag_get(node, IDR_FREE, offset)) { offset = radix_tree_find_next_bit(node, IDR_FREE, offset + 1); start = next_index(start, node, offset); if (start > max || start == 0) return ERR_PTR(-ENOSPC); while (offset == RADIX_TREE_MAP_SIZE) { offset = node->offset + 1; node = node->parent; if (!node) goto grow; shift = node->shift; } child = rcu_dereference_raw(node->slots[offset]); } slot = &node->slots[offset]; } iter->index = start; if (node) iter->next_index = 1 + min(max, (start | node_maxindex(node))); else iter->next_index = 1; iter->node = node; set_iter_tags(iter, node, offset, IDR_FREE); return slot; } /** * idr_destroy - release all internal memory from an IDR * @idr: idr handle * * After this function is called, the IDR is empty, and may be reused or * the data structure containing it may be freed. * * A typical clean-up sequence for objects stored in an idr tree will use * idr_for_each() to free all objects, if necessary, then idr_destroy() to * free the memory used to keep track of those objects. */ void idr_destroy(struct idr *idr) { struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head); if (radix_tree_is_internal_node(node)) radix_tree_free_nodes(node); idr->idr_rt.xa_head = NULL; root_tag_set(&idr->idr_rt, IDR_FREE); } EXPORT_SYMBOL(idr_destroy); static void radix_tree_node_ctor(void *arg) { struct radix_tree_node *node = arg; memset(node, 0, sizeof(*node)); INIT_LIST_HEAD(&node->private_list); } static int radix_tree_cpu_dead(unsigned int cpu) { struct radix_tree_preload *rtp; struct radix_tree_node *node; /* Free per-cpu pool of preloaded nodes */ rtp = &per_cpu(radix_tree_preloads, cpu); while (rtp->nr) { node = rtp->nodes; rtp->nodes = node->parent; kmem_cache_free(radix_tree_node_cachep, node); rtp->nr--; } return 0; } void __init radix_tree_init(void) { int ret; BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32); BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK); BUILD_BUG_ON(XA_CHUNK_SIZE > 255); radix_tree_node_cachep = kmem_cache_create("radix_tree_node", sizeof(struct radix_tree_node), 0, SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, radix_tree_node_ctor); ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead", NULL, radix_tree_cpu_dead); WARN_ON(ret < 0); }
32 9 1 4 2 2 21 1 11 9 25 9 16 25 27 27 2 25 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 // SPDX-License-Identifier: GPL-2.0-or-later /* * vimc-debayer.c Virtual Media Controller Driver * * Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com> */ #include <linux/moduleparam.h> #include <linux/platform_device.h> #include <linux/vmalloc.h> #include <linux/v4l2-mediabus.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <media/v4l2-subdev.h> #include "vimc-common.h" /* TODO: Add support for more output formats, we only support RGB888 for now. */ #define VIMC_DEBAYER_SOURCE_MBUS_FMT MEDIA_BUS_FMT_RGB888_1X24 enum vimc_debayer_rgb_colors { VIMC_DEBAYER_RED = 0, VIMC_DEBAYER_GREEN = 1, VIMC_DEBAYER_BLUE = 2, }; struct vimc_debayer_pix_map { u32 code; enum vimc_debayer_rgb_colors order[2][2]; }; struct vimc_debayer_device { struct vimc_ent_device ved; struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; struct media_pad pads[2]; u8 *src_frame; void (*set_rgb_src)(struct vimc_debayer_device *vdebayer, unsigned int lin, unsigned int col, unsigned int rgb[3]); /* * Virtual "hardware" configuration, filled when the stream starts or * when controls are set. */ struct { const struct vimc_debayer_pix_map *sink_pix_map; unsigned int sink_bpp; struct v4l2_area size; unsigned int mean_win_size; u32 src_code; } hw; }; static const struct v4l2_mbus_framefmt sink_fmt_default = { .width = 640, .height = 480, .code = MEDIA_BUS_FMT_SRGGB8_1X8, .field = V4L2_FIELD_NONE, .colorspace = V4L2_COLORSPACE_SRGB, }; static const u32 vimc_debayer_src_mbus_codes[] = { MEDIA_BUS_FMT_GBR888_1X24, MEDIA_BUS_FMT_BGR888_1X24, MEDIA_BUS_FMT_BGR888_3X8, MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_RGB888_2X12_BE, MEDIA_BUS_FMT_RGB888_2X12_LE, MEDIA_BUS_FMT_RGB888_3X8, MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, MEDIA_BUS_FMT_RGB888_1X32_PADHI, }; static const struct vimc_debayer_pix_map vimc_debayer_pix_map_list[] = { { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .order = { { VIMC_DEBAYER_BLUE, VIMC_DEBAYER_GREEN }, { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_RED } } }, { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .order = { { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_BLUE }, { VIMC_DEBAYER_RED, VIMC_DEBAYER_GREEN } } }, { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .order = { { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_RED }, { VIMC_DEBAYER_BLUE, VIMC_DEBAYER_GREEN } } }, { .code = MEDIA_BUS_FMT_SRGGB8_1X8, .order = { { VIMC_DEBAYER_RED, VIMC_DEBAYER_GREEN }, { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_BLUE } } }, { .code = MEDIA_BUS_FMT_SBGGR10_1X10, .order = { { VIMC_DEBAYER_BLUE, VIMC_DEBAYER_GREEN }, { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_RED } } }, { .code = MEDIA_BUS_FMT_SGBRG10_1X10, .order = { { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_BLUE }, { VIMC_DEBAYER_RED, VIMC_DEBAYER_GREEN } } }, { .code = MEDIA_BUS_FMT_SGRBG10_1X10, .order = { { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_RED }, { VIMC_DEBAYER_BLUE, VIMC_DEBAYER_GREEN } } }, { .code = MEDIA_BUS_FMT_SRGGB10_1X10, .order = { { VIMC_DEBAYER_RED, VIMC_DEBAYER_GREEN }, { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_BLUE } } }, { .code = MEDIA_BUS_FMT_SBGGR12_1X12, .order = { { VIMC_DEBAYER_BLUE, VIMC_DEBAYER_GREEN }, { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_RED } } }, { .code = MEDIA_BUS_FMT_SGBRG12_1X12, .order = { { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_BLUE }, { VIMC_DEBAYER_RED, VIMC_DEBAYER_GREEN } } }, { .code = MEDIA_BUS_FMT_SGRBG12_1X12, .order = { { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_RED }, { VIMC_DEBAYER_BLUE, VIMC_DEBAYER_GREEN } } }, { .code = MEDIA_BUS_FMT_SRGGB12_1X12, .order = { { VIMC_DEBAYER_RED, VIMC_DEBAYER_GREEN }, { VIMC_DEBAYER_GREEN, VIMC_DEBAYER_BLUE } } }, }; static const struct vimc_debayer_pix_map *vimc_debayer_pix_map_by_code(u32 code) { unsigned int i; for (i = 0; i < ARRAY_SIZE(vimc_debayer_pix_map_list); i++) if (vimc_debayer_pix_map_list[i].code == code) return &vimc_debayer_pix_map_list[i]; return NULL; } static bool vimc_debayer_src_code_is_valid(u32 code) { unsigned int i; for (i = 0; i < ARRAY_SIZE(vimc_debayer_src_mbus_codes); i++) if (vimc_debayer_src_mbus_codes[i] == code) return true; return false; } static int vimc_debayer_init_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state) { struct v4l2_mbus_framefmt *mf; mf = v4l2_subdev_state_get_format(sd_state, 0); *mf = sink_fmt_default; mf = v4l2_subdev_state_get_format(sd_state, 1); *mf = sink_fmt_default; mf->code = VIMC_DEBAYER_SOURCE_MBUS_FMT; return 0; } static int vimc_debayer_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (VIMC_IS_SRC(code->pad)) { if (code->index >= ARRAY_SIZE(vimc_debayer_src_mbus_codes)) return -EINVAL; code->code = vimc_debayer_src_mbus_codes[code->index]; } else { if (code->index >= ARRAY_SIZE(vimc_debayer_pix_map_list)) return -EINVAL; code->code = vimc_debayer_pix_map_list[code->index].code; } return 0; } static int vimc_debayer_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { if (fse->index) return -EINVAL; if (VIMC_IS_SINK(fse->pad)) { const struct vimc_debayer_pix_map *vpix = vimc_debayer_pix_map_by_code(fse->code); if (!vpix) return -EINVAL; } else if (!vimc_debayer_src_code_is_valid(fse->code)) { return -EINVAL; } fse->min_width = VIMC_FRAME_MIN_WIDTH; fse->max_width = VIMC_FRAME_MAX_WIDTH; fse->min_height = VIMC_FRAME_MIN_HEIGHT; fse->max_height = VIMC_FRAME_MAX_HEIGHT; return 0; } static void vimc_debayer_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt) { const struct vimc_debayer_pix_map *vpix; /* Don't accept a code that is not on the debayer table */ vpix = vimc_debayer_pix_map_by_code(fmt->code); if (!vpix) fmt->code = sink_fmt_default.code; fmt->width = clamp_t(u32, fmt->width, VIMC_FRAME_MIN_WIDTH, VIMC_FRAME_MAX_WIDTH) & ~1; fmt->height = clamp_t(u32, fmt->height, VIMC_FRAME_MIN_HEIGHT, VIMC_FRAME_MAX_HEIGHT) & ~1; if (fmt->field == V4L2_FIELD_ANY) fmt->field = sink_fmt_default.field; vimc_colorimetry_clamp(fmt); } static int vimc_debayer_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct vimc_debayer_device *vdebayer = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; /* Do not change the format while stream is on. */ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE && vdebayer->src_frame) return -EBUSY; /* * Do not change the format of the source pad, it is propagated from * the sink. */ if (VIMC_IS_SRC(fmt->pad)) return v4l2_subdev_get_fmt(sd, sd_state, fmt); /* Set the new format in the sink pad. */ vimc_debayer_adjust_sink_fmt(&fmt->format); format = v4l2_subdev_state_get_format(sd_state, 0); dev_dbg(vdebayer->ved.dev, "%s: sink format update: " "old:%dx%d (0x%x, %d, %d, %d, %d) " "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vdebayer->sd.name, /* old */ format->width, format->height, format->code, format->colorspace, format->quantization, format->xfer_func, format->ycbcr_enc, /* new */ fmt->format.width, fmt->format.height, fmt->format.code, fmt->format.colorspace, fmt->format.quantization, fmt->format.xfer_func, fmt->format.ycbcr_enc); *format = fmt->format; /* Propagate the format to the source pad. */ format = v4l2_subdev_state_get_format(sd_state, 1); *format = fmt->format; format->code = VIMC_DEBAYER_SOURCE_MBUS_FMT; return 0; } static const struct v4l2_subdev_pad_ops vimc_debayer_pad_ops = { .enum_mbus_code = vimc_debayer_enum_mbus_code, .enum_frame_size = vimc_debayer_enum_frame_size, .get_fmt = v4l2_subdev_get_fmt, .set_fmt = vimc_debayer_set_fmt, }; static void vimc_debayer_process_rgb_frame(struct vimc_debayer_device *vdebayer, unsigned int lin, unsigned int col, unsigned int rgb[3]) { const struct vimc_pix_map *vpix; unsigned int i, index; vpix = vimc_pix_map_by_code(vdebayer->hw.src_code); index = VIMC_FRAME_INDEX(lin, col, vdebayer->hw.size.width, 3); for (i = 0; i < 3; i++) { switch (vpix->pixelformat) { case V4L2_PIX_FMT_RGB24: vdebayer->src_frame[index + i] = rgb[i]; break; case V4L2_PIX_FMT_BGR24: vdebayer->src_frame[index + i] = rgb[2 - i]; break; } } } static int vimc_debayer_s_stream(struct v4l2_subdev *sd, int enable) { struct vimc_debayer_device *vdebayer = v4l2_get_subdevdata(sd); if (enable) { const struct v4l2_mbus_framefmt *sink_fmt; const struct v4l2_mbus_framefmt *src_fmt; struct v4l2_subdev_state *state; const struct vimc_pix_map *vpix; unsigned int frame_size; if (vdebayer->src_frame) return 0; state = v4l2_subdev_lock_and_get_active_state(sd); sink_fmt = v4l2_subdev_state_get_format(state, 0); src_fmt = v4l2_subdev_state_get_format(state, 1); /* Calculate the frame size of the source pad */ vpix = vimc_pix_map_by_code(src_fmt->code); frame_size = src_fmt->width * src_fmt->height * vpix->bpp; /* Save the bytes per pixel of the sink */ vpix = vimc_pix_map_by_code(sink_fmt->code); vdebayer->hw.sink_bpp = vpix->bpp; /* Get the corresponding pixel map from the table */ vdebayer->hw.sink_pix_map = vimc_debayer_pix_map_by_code(sink_fmt->code); vdebayer->hw.size.width = sink_fmt->width; vdebayer->hw.size.height = sink_fmt->height; vdebayer->hw.src_code = src_fmt->code; v4l2_subdev_unlock_state(state); /* * Allocate the frame buffer. Use vmalloc to be able to * allocate a large amount of memory */ vdebayer->src_frame = vmalloc(frame_size); if (!vdebayer->src_frame) return -ENOMEM; } else { if (!vdebayer->src_frame) return 0; vfree(vdebayer->src_frame); vdebayer->src_frame = NULL; } return 0; } static const struct v4l2_subdev_core_ops vimc_debayer_core_ops = { .log_status = v4l2_ctrl_subdev_log_status, .subscribe_event = v4l2_ctrl_subdev_subscribe_event, .unsubscribe_event = v4l2_event_subdev_unsubscribe, }; static const struct v4l2_subdev_video_ops vimc_debayer_video_ops = { .s_stream = vimc_debayer_s_stream, }; static const struct v4l2_subdev_ops vimc_debayer_ops = { .core = &vimc_debayer_core_ops, .pad = &vimc_debayer_pad_ops, .video = &vimc_debayer_video_ops, }; static const struct v4l2_subdev_internal_ops vimc_debayer_internal_ops = { .init_state = vimc_debayer_init_state, }; static unsigned int vimc_debayer_get_val(const u8 *bytes, const unsigned int n_bytes) { unsigned int i; unsigned int acc = 0; for (i = 0; i < n_bytes; i++) acc = acc + (bytes[i] << (8 * i)); return acc; } static void vimc_debayer_calc_rgb_sink(struct vimc_debayer_device *vdebayer, const u8 *frame, const unsigned int lin, const unsigned int col, unsigned int rgb[3]) { unsigned int i, seek, wlin, wcol; unsigned int n_rgb[3] = {0, 0, 0}; for (i = 0; i < 3; i++) rgb[i] = 0; /* * Calculate how many we need to subtract to get to the pixel in * the top left corner of the mean window (considering the current * pixel as the center) */ seek = vdebayer->hw.mean_win_size / 2; /* Sum the values of the colors in the mean window */ dev_dbg(vdebayer->ved.dev, "deb: %s: --- Calc pixel %dx%d, window mean %d, seek %d ---\n", vdebayer->sd.name, lin, col, vdebayer->hw.size.height, seek); /* * Iterate through all the lines in the mean window, start * with zero if the pixel is outside the frame and don't pass * the height when the pixel is in the bottom border of the * frame */ for (wlin = seek > lin ? 0 : lin - seek; wlin < lin + seek + 1 && wlin < vdebayer->hw.size.height; wlin++) { /* * Iterate through all the columns in the mean window, start * with zero if the pixel is outside the frame and don't pass * the width when the pixel is in the right border of the * frame */ for (wcol = seek > col ? 0 : col - seek; wcol < col + seek + 1 && wcol < vdebayer->hw.size.width; wcol++) { enum vimc_debayer_rgb_colors color; unsigned int index; /* Check which color this pixel is */ color = vdebayer->hw.sink_pix_map->order[wlin % 2][wcol % 2]; index = VIMC_FRAME_INDEX(wlin, wcol, vdebayer->hw.size.width, vdebayer->hw.sink_bpp); dev_dbg(vdebayer->ved.dev, "deb: %s: RGB CALC: frame index %d, win pos %dx%d, color %d\n", vdebayer->sd.name, index, wlin, wcol, color); /* Get its value */ rgb[color] = rgb[color] + vimc_debayer_get_val(&frame[index], vdebayer->hw.sink_bpp); /* Save how many values we already added */ n_rgb[color]++; dev_dbg(vdebayer->ved.dev, "deb: %s: RGB CALC: val %d, n %d\n", vdebayer->sd.name, rgb[color], n_rgb[color]); } } /* Calculate the mean */ for (i = 0; i < 3; i++) { dev_dbg(vdebayer->ved.dev, "deb: %s: PRE CALC: %dx%d Color %d, val %d, n %d\n", vdebayer->sd.name, lin, col, i, rgb[i], n_rgb[i]); if (n_rgb[i]) rgb[i] = rgb[i] / n_rgb[i]; dev_dbg(vdebayer->ved.dev, "deb: %s: FINAL CALC: %dx%d Color %d, val %d\n", vdebayer->sd.name, lin, col, i, rgb[i]); } } static void *vimc_debayer_process_frame(struct vimc_ent_device *ved, const void *sink_frame) { struct vimc_debayer_device *vdebayer = container_of(ved, struct vimc_debayer_device, ved); unsigned int rgb[3]; unsigned int i, j; /* If the stream in this node is not active, just return */ if (!vdebayer->src_frame) return ERR_PTR(-EINVAL); for (i = 0; i < vdebayer->hw.size.height; i++) for (j = 0; j < vdebayer->hw.size.width; j++) { vimc_debayer_calc_rgb_sink(vdebayer, sink_frame, i, j, rgb); vdebayer->set_rgb_src(vdebayer, i, j, rgb); } return vdebayer->src_frame; } static int vimc_debayer_s_ctrl(struct v4l2_ctrl *ctrl) { struct vimc_debayer_device *vdebayer = container_of(ctrl->handler, struct vimc_debayer_device, hdl); switch (ctrl->id) { case VIMC_CID_MEAN_WIN_SIZE: vdebayer->hw.mean_win_size = ctrl->val; break; default: return -EINVAL; } return 0; } static const struct v4l2_ctrl_ops vimc_debayer_ctrl_ops = { .s_ctrl = vimc_debayer_s_ctrl, }; static void vimc_debayer_release(struct vimc_ent_device *ved) { struct vimc_debayer_device *vdebayer = container_of(ved, struct vimc_debayer_device, ved); v4l2_ctrl_handler_free(&vdebayer->hdl); v4l2_subdev_cleanup(&vdebayer->sd); media_entity_cleanup(vdebayer->ved.ent); kfree(vdebayer); } static const struct v4l2_ctrl_config vimc_debayer_ctrl_class = { .flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY, .id = VIMC_CID_VIMC_CLASS, .name = "VIMC Controls", .type = V4L2_CTRL_TYPE_CTRL_CLASS, }; static const struct v4l2_ctrl_config vimc_debayer_ctrl_mean_win_size = { .ops = &vimc_debayer_ctrl_ops, .id = VIMC_CID_MEAN_WIN_SIZE, .name = "Debayer Mean Window Size", .type = V4L2_CTRL_TYPE_INTEGER, .min = 1, .max = 25, .step = 2, .def = 3, }; static struct vimc_ent_device *vimc_debayer_add(struct vimc_device *vimc, const char *vcfg_name) { struct v4l2_device *v4l2_dev = &vimc->v4l2_dev; struct vimc_debayer_device *vdebayer; int ret; /* Allocate the vdebayer struct */ vdebayer = kzalloc(sizeof(*vdebayer), GFP_KERNEL); if (!vdebayer) return ERR_PTR(-ENOMEM); /* Create controls: */ v4l2_ctrl_handler_init(&vdebayer->hdl, 2); v4l2_ctrl_new_custom(&vdebayer->hdl, &vimc_debayer_ctrl_class, NULL); v4l2_ctrl_new_custom(&vdebayer->hdl, &vimc_debayer_ctrl_mean_win_size, NULL); vdebayer->sd.ctrl_handler = &vdebayer->hdl; if (vdebayer->hdl.error) { ret = vdebayer->hdl.error; goto err_free_vdebayer; } /* Initialize ved and sd */ vdebayer->pads[0].flags = MEDIA_PAD_FL_SINK; vdebayer->pads[1].flags = MEDIA_PAD_FL_SOURCE; ret = vimc_ent_sd_register(&vdebayer->ved, &vdebayer->sd, v4l2_dev, vcfg_name, MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV, 2, vdebayer->pads, &vimc_debayer_internal_ops, &vimc_debayer_ops); if (ret) goto err_free_hdl; vdebayer->ved.process_frame = vimc_debayer_process_frame; vdebayer->ved.dev = vimc->mdev.dev; vdebayer->hw.mean_win_size = vimc_debayer_ctrl_mean_win_size.def; vdebayer->set_rgb_src = vimc_debayer_process_rgb_frame; return &vdebayer->ved; err_free_hdl: v4l2_ctrl_handler_free(&vdebayer->hdl); err_free_vdebayer: kfree(vdebayer); return ERR_PTR(ret); } const struct vimc_ent_type vimc_debayer_type = { .add = vimc_debayer_add, .release = vimc_debayer_release };
68 68 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 /* * Mapping of UID/GIDs to name and vice versa. * * Copyright (c) 2002, 2003 The Regents of the University of * Michigan. All rights reserved. * * Marius Aamodt Eriksen <marius@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/sunrpc/svc_xprt.h> #include <net/net_namespace.h> #include "idmap.h" #include "nfsd.h" #include "netns.h" #include "vfs.h" /* * Turn off idmapping when using AUTH_SYS. */ static bool nfs4_disable_idmapping = true; module_param(nfs4_disable_idmapping, bool, 0644); MODULE_PARM_DESC(nfs4_disable_idmapping, "Turn off server's NFSv4 idmapping when using 'sec=sys'"); /* * Cache entry */ /* * XXX we know that IDMAP_NAMESZ < PAGE_SIZE, but it's ugly to rely on * that. */ struct ent { struct cache_head h; int type; /* User / Group */ u32 id; char name[IDMAP_NAMESZ]; char authname[IDMAP_NAMESZ]; struct rcu_head rcu_head; }; /* Common entry handling */ #define ENT_HASHBITS 8 #define ENT_HASHMAX (1 << ENT_HASHBITS) static void ent_init(struct cache_head *cnew, struct cache_head *citm) { struct ent *new = container_of(cnew, struct ent, h); struct ent *itm = container_of(citm, struct ent, h); new->id = itm->id; new->type = itm->type; strscpy(new->name, itm->name, sizeof(new->name)); strscpy(new->authname, itm->authname, sizeof(new->authname)); } static void ent_put(struct kref *ref) { struct ent *map = container_of(ref, struct ent, h.ref); kfree_rcu(map, rcu_head); } static struct cache_head * ent_alloc(void) { struct ent *e = kmalloc(sizeof(*e), GFP_KERNEL); if (e) return &e->h; else return NULL; } /* * ID -> Name cache */ static uint32_t idtoname_hash(struct ent *ent) { uint32_t hash; hash = hash_str(ent->authname, ENT_HASHBITS); hash = hash_long(hash ^ ent->id, ENT_HASHBITS); /* Flip LSB for user/group */ if (ent->type == IDMAP_TYPE_GROUP) hash ^= 1; return hash; } static int idtoname_upcall(struct cache_detail *cd, struct cache_head *h) { return sunrpc_cache_pipe_upcall_timeout(cd, h); } static void idtoname_request(struct cache_detail *cd, struct cache_head *ch, char **bpp, int *blen) { struct ent *ent = container_of(ch, struct ent, h); char idstr[11]; qword_add(bpp, blen, ent->authname); snprintf(idstr, sizeof(idstr), "%u", ent->id); qword_add(bpp, blen, ent->type == IDMAP_TYPE_GROUP ? "group" : "user"); qword_add(bpp, blen, idstr); (*bpp)[-1] = '\n'; } static int idtoname_match(struct cache_head *ca, struct cache_head *cb) { struct ent *a = container_of(ca, struct ent, h); struct ent *b = container_of(cb, struct ent, h); return (a->id == b->id && a->type == b->type && strcmp(a->authname, b->authname) == 0); } static int idtoname_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) { struct ent *ent; if (h == NULL) { seq_puts(m, "#domain type id [name]\n"); return 0; } ent = container_of(h, struct ent, h); seq_printf(m, "%s %s %u", ent->authname, ent->type == IDMAP_TYPE_GROUP ? "group" : "user", ent->id); if (test_bit(CACHE_VALID, &h->flags)) seq_printf(m, " %s", ent->name); seq_putc(m, '\n'); return 0; } static void warn_no_idmapd(struct cache_detail *detail, int has_died) { printk("nfsd: nfsv4 idmapping failing: has idmapd %s?\n", has_died ? "died" : "not been started"); } static int idtoname_parse(struct cache_detail *, char *, int); static struct ent *idtoname_lookup(struct cache_detail *, struct ent *); static struct ent *idtoname_update(struct cache_detail *, struct ent *, struct ent *); static const struct cache_detail idtoname_cache_template = { .owner = THIS_MODULE, .hash_size = ENT_HASHMAX, .name = "nfs4.idtoname", .cache_put = ent_put, .cache_upcall = idtoname_upcall, .cache_request = idtoname_request, .cache_parse = idtoname_parse, .cache_show = idtoname_show, .warn_no_listener = warn_no_idmapd, .match = idtoname_match, .init = ent_init, .update = ent_init, .alloc = ent_alloc, }; static int idtoname_parse(struct cache_detail *cd, char *buf, int buflen) { struct ent ent, *res; char *buf1, *bp; int len; int error = -EINVAL; if (buf[buflen - 1] != '\n') return (-EINVAL); buf[buflen - 1]= '\0'; buf1 = kmalloc(PAGE_SIZE, GFP_KERNEL); if (buf1 == NULL) return (-ENOMEM); memset(&ent, 0, sizeof(ent)); /* Authentication name */ len = qword_get(&buf, buf1, PAGE_SIZE); if (len <= 0 || len >= IDMAP_NAMESZ) goto out; memcpy(ent.authname, buf1, sizeof(ent.authname)); /* Type */ if (qword_get(&buf, buf1, PAGE_SIZE) <= 0) goto out; ent.type = strcmp(buf1, "user") == 0 ? IDMAP_TYPE_USER : IDMAP_TYPE_GROUP; /* ID */ if (qword_get(&buf, buf1, PAGE_SIZE) <= 0) goto out; ent.id = simple_strtoul(buf1, &bp, 10); if (bp == buf1) goto out; /* expiry */ error = get_expiry(&buf, &ent.h.expiry_time); if (error) goto out; error = -ENOMEM; res = idtoname_lookup(cd, &ent); if (!res) goto out; /* Name */ error = -EINVAL; len = qword_get(&buf, buf1, PAGE_SIZE); if (len < 0 || len >= IDMAP_NAMESZ) goto out; if (len == 0) set_bit(CACHE_NEGATIVE, &ent.h.flags); else memcpy(ent.name, buf1, sizeof(ent.name)); error = -ENOMEM; res = idtoname_update(cd, &ent, res); if (res == NULL) goto out; cache_put(&res->h, cd); error = 0; out: kfree(buf1); return error; } static struct ent * idtoname_lookup(struct cache_detail *cd, struct ent *item) { struct cache_head *ch = sunrpc_cache_lookup_rcu(cd, &item->h, idtoname_hash(item)); if (ch) return container_of(ch, struct ent, h); else return NULL; } static struct ent * idtoname_update(struct cache_detail *cd, struct ent *new, struct ent *old) { struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h, idtoname_hash(new)); if (ch) return container_of(ch, struct ent, h); else return NULL; } /* * Name -> ID cache */ static inline int nametoid_hash(struct ent *ent) { return hash_str(ent->name, ENT_HASHBITS); } static int nametoid_upcall(struct cache_detail *cd, struct cache_head *h) { return sunrpc_cache_pipe_upcall_timeout(cd, h); } static void nametoid_request(struct cache_detail *cd, struct cache_head *ch, char **bpp, int *blen) { struct ent *ent = container_of(ch, struct ent, h); qword_add(bpp, blen, ent->authname); qword_add(bpp, blen, ent->type == IDMAP_TYPE_GROUP ? "group" : "user"); qword_add(bpp, blen, ent->name); (*bpp)[-1] = '\n'; } static int nametoid_match(struct cache_head *ca, struct cache_head *cb) { struct ent *a = container_of(ca, struct ent, h); struct ent *b = container_of(cb, struct ent, h); return (a->type == b->type && strcmp(a->name, b->name) == 0 && strcmp(a->authname, b->authname) == 0); } static int nametoid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) { struct ent *ent; if (h == NULL) { seq_puts(m, "#domain type name [id]\n"); return 0; } ent = container_of(h, struct ent, h); seq_printf(m, "%s %s %s", ent->authname, ent->type == IDMAP_TYPE_GROUP ? "group" : "user", ent->name); if (test_bit(CACHE_VALID, &h->flags)) seq_printf(m, " %u", ent->id); seq_putc(m, '\n'); return 0; } static struct ent *nametoid_lookup(struct cache_detail *, struct ent *); static struct ent *nametoid_update(struct cache_detail *, struct ent *, struct ent *); static int nametoid_parse(struct cache_detail *, char *, int); static const struct cache_detail nametoid_cache_template = { .owner = THIS_MODULE, .hash_size = ENT_HASHMAX, .name = "nfs4.nametoid", .cache_put = ent_put, .cache_upcall = nametoid_upcall, .cache_request = nametoid_request, .cache_parse = nametoid_parse, .cache_show = nametoid_show, .warn_no_listener = warn_no_idmapd, .match = nametoid_match, .init = ent_init, .update = ent_init, .alloc = ent_alloc, }; static int nametoid_parse(struct cache_detail *cd, char *buf, int buflen) { struct ent ent, *res; char *buf1; int len, error = -EINVAL; if (buf[buflen - 1] != '\n') return (-EINVAL); buf[buflen - 1]= '\0'; buf1 = kmalloc(PAGE_SIZE, GFP_KERNEL); if (buf1 == NULL) return (-ENOMEM); memset(&ent, 0, sizeof(ent)); /* Authentication name */ len = qword_get(&buf, buf1, PAGE_SIZE); if (len <= 0 || len >= IDMAP_NAMESZ) goto out; memcpy(ent.authname, buf1, sizeof(ent.authname)); /* Type */ if (qword_get(&buf, buf1, PAGE_SIZE) <= 0) goto out; ent.type = strcmp(buf1, "user") == 0 ? IDMAP_TYPE_USER : IDMAP_TYPE_GROUP; /* Name */ len = qword_get(&buf, buf1, PAGE_SIZE); if (len <= 0 || len >= IDMAP_NAMESZ) goto out; memcpy(ent.name, buf1, sizeof(ent.name)); /* expiry */ error = get_expiry(&buf, &ent.h.expiry_time); if (error) goto out; /* ID */ error = get_int(&buf, &ent.id); if (error == -EINVAL) goto out; if (error == -ENOENT) set_bit(CACHE_NEGATIVE, &ent.h.flags); error = -ENOMEM; res = nametoid_lookup(cd, &ent); if (res == NULL) goto out; res = nametoid_update(cd, &ent, res); if (res == NULL) goto out; cache_put(&res->h, cd); error = 0; out: kfree(buf1); return (error); } static struct ent * nametoid_lookup(struct cache_detail *cd, struct ent *item) { struct cache_head *ch = sunrpc_cache_lookup_rcu(cd, &item->h, nametoid_hash(item)); if (ch) return container_of(ch, struct ent, h); else return NULL; } static struct ent * nametoid_update(struct cache_detail *cd, struct ent *new, struct ent *old) { struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h, nametoid_hash(new)); if (ch) return container_of(ch, struct ent, h); else return NULL; } /* * Exported API */ int nfsd_idmap_init(struct net *net) { int rv; struct nfsd_net *nn = net_generic(net, nfsd_net_id); nn->idtoname_cache = cache_create_net(&idtoname_cache_template, net); if (IS_ERR(nn->idtoname_cache)) return PTR_ERR(nn->idtoname_cache); rv = cache_register_net(nn->idtoname_cache, net); if (rv) goto destroy_idtoname_cache; nn->nametoid_cache = cache_create_net(&nametoid_cache_template, net); if (IS_ERR(nn->nametoid_cache)) { rv = PTR_ERR(nn->nametoid_cache); goto unregister_idtoname_cache; } rv = cache_register_net(nn->nametoid_cache, net); if (rv) goto destroy_nametoid_cache; return 0; destroy_nametoid_cache: cache_destroy_net(nn->nametoid_cache, net); unregister_idtoname_cache: cache_unregister_net(nn->idtoname_cache, net); destroy_idtoname_cache: cache_destroy_net(nn->idtoname_cache, net); return rv; } void nfsd_idmap_shutdown(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); cache_unregister_net(nn->idtoname_cache, net); cache_unregister_net(nn->nametoid_cache, net); cache_destroy_net(nn->idtoname_cache, net); cache_destroy_net(nn->nametoid_cache, net); } static int idmap_lookup(struct svc_rqst *rqstp, struct ent *(*lookup_fn)(struct cache_detail *, struct ent *), struct ent *key, struct cache_detail *detail, struct ent **item) { int ret; *item = lookup_fn(detail, key); if (!*item) return -ENOMEM; retry: ret = cache_check(detail, &(*item)->h, &rqstp->rq_chandle); if (ret == -ETIMEDOUT) { struct ent *prev_item = *item; *item = lookup_fn(detail, key); if (*item != prev_item) goto retry; cache_put(&(*item)->h, detail); } return ret; } static char * rqst_authname(struct svc_rqst *rqstp) { struct auth_domain *clp; clp = rqstp->rq_gssclient ? rqstp->rq_gssclient : rqstp->rq_client; return clp->name; } static __be32 idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, u32 *id) { struct ent *item, key = { .type = type, }; int ret; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); if (namelen + 1 > sizeof(key.name)) return nfserr_badowner; memcpy(key.name, name, namelen); key.name[namelen] = '\0'; strscpy(key.authname, rqst_authname(rqstp), sizeof(key.authname)); ret = idmap_lookup(rqstp, nametoid_lookup, &key, nn->nametoid_cache, &item); if (ret == -ENOENT) return nfserr_badowner; if (ret) return nfserrno(ret); *id = item->id; cache_put(&item->h, nn->nametoid_cache); return 0; } static __be32 encode_ascii_id(struct xdr_stream *xdr, u32 id) { char buf[11]; int len; __be32 *p; len = sprintf(buf, "%u", id); p = xdr_reserve_space(xdr, len + 4); if (!p) return nfserr_resource; p = xdr_encode_opaque(p, buf, len); return 0; } static __be32 idmap_id_to_name(struct xdr_stream *xdr, struct svc_rqst *rqstp, int type, u32 id) { struct ent *item, key = { .id = id, .type = type, }; __be32 *p; int ret; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); strscpy(key.authname, rqst_authname(rqstp), sizeof(key.authname)); ret = idmap_lookup(rqstp, idtoname_lookup, &key, nn->idtoname_cache, &item); if (ret == -ENOENT) return encode_ascii_id(xdr, id); if (ret) return nfserrno(ret); ret = strlen(item->name); WARN_ON_ONCE(ret > IDMAP_NAMESZ); p = xdr_reserve_space(xdr, ret + 4); if (!p) return nfserr_resource; p = xdr_encode_opaque(p, item->name, ret); cache_put(&item->h, nn->idtoname_cache); return 0; } static bool numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, u32 *id) { int ret; char buf[11]; if (namelen + 1 > sizeof(buf)) /* too long to represent a 32-bit id: */ return false; /* Just to make sure it's null-terminated: */ memcpy(buf, name, namelen); buf[namelen] = '\0'; ret = kstrtouint(buf, 10, id); return ret == 0; } static __be32 do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, u32 *id) { if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS) if (numeric_name_to_id(rqstp, type, name, namelen, id)) return 0; /* * otherwise, fall through and try idmapping, for * backwards compatibility with clients sending names: */ return idmap_name_to_id(rqstp, type, name, namelen, id); } static __be32 encode_name_from_id(struct xdr_stream *xdr, struct svc_rqst *rqstp, int type, u32 id) { if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS) return encode_ascii_id(xdr, id); return idmap_id_to_name(xdr, rqstp, type, id); } __be32 nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen, kuid_t *uid) { __be32 status; u32 id = -1; if (name == NULL || namelen == 0) return nfserr_inval; status = do_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, &id); *uid = make_kuid(nfsd_user_namespace(rqstp), id); if (!uid_valid(*uid)) status = nfserr_badowner; return status; } __be32 nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen, kgid_t *gid) { __be32 status; u32 id = -1; if (name == NULL || namelen == 0) return nfserr_inval; status = do_name_to_id(rqstp, IDMAP_TYPE_GROUP, name, namelen, &id); *gid = make_kgid(nfsd_user_namespace(rqstp), id); if (!gid_valid(*gid)) status = nfserr_badowner; return status; } __be32 nfsd4_encode_user(struct xdr_stream *xdr, struct svc_rqst *rqstp, kuid_t uid) { u32 id = from_kuid_munged(nfsd_user_namespace(rqstp), uid); return encode_name_from_id(xdr, rqstp, IDMAP_TYPE_USER, id); } __be32 nfsd4_encode_group(struct xdr_stream *xdr, struct svc_rqst *rqstp, kgid_t gid) { u32 id = from_kgid_munged(nfsd_user_namespace(rqstp), gid); return encode_name_from_id(xdr, rqstp, IDMAP_TYPE_GROUP, id); }
28 28 20 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef BTRFS_ZONED_H #define BTRFS_ZONED_H #include <linux/types.h> #include <linux/atomic.h> #include <linux/blkdev.h> #include <linux/blkzoned.h> #include <linux/errno.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include "messages.h" #include "volumes.h" #include "disk-io.h" #include "block-group.h" #include "btrfs_inode.h" #include "fs.h" struct block_device; struct extent_buffer; struct btrfs_bio; struct btrfs_ordered_extent; struct btrfs_fs_info; struct btrfs_space_info; struct btrfs_eb_write_context; struct btrfs_fs_devices; #define BTRFS_DEFAULT_RECLAIM_THRESH (75) struct btrfs_zoned_device_info { /* * Number of zones, zone size and types of zones if bdev is a * zoned block device. */ u64 zone_size; u8 zone_size_shift; u32 nr_zones; unsigned int max_active_zones; /* * Reserved active zones for one metadata and one system block group. * It can vary per-device depending on the allocation status. */ int reserved_active_zones; atomic_t active_zones_left; unsigned long *seq_zones; unsigned long *empty_zones; unsigned long *active_zones; struct blk_zone *zone_cache; struct blk_zone sb_zones[2 * BTRFS_SUPER_MIRROR_MAX]; }; void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered); #ifdef CONFIG_BLK_DEV_ZONED int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info); int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache); void btrfs_destroy_dev_zone_info(struct btrfs_device *device); struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev); int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info); int btrfs_check_mountopts_zoned(const struct btrfs_fs_info *info, unsigned long long *mount_opt); int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw, u64 *bytenr_ret); int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw, u64 *bytenr_ret); int btrfs_advance_sb_log(struct btrfs_device *device, int mirror); int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror); u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start, u64 hole_end, u64 num_bytes); int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical, u64 length, u64 *bytes); int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size); int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new); void btrfs_calc_zone_unusable(struct btrfs_block_group *cache); bool btrfs_use_zone_append(struct btrfs_bio *bbio); void btrfs_record_physical_zoned(struct btrfs_bio *bbio); int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info, struct btrfs_eb_write_context *ctx); int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length); int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical, u64 physical_start, u64 physical_pos); bool btrfs_zone_activate(struct btrfs_block_group *block_group); int btrfs_zone_finish(struct btrfs_block_group *block_group); bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags); void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length); void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg, struct extent_buffer *eb); void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg); void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info); bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info); void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical, u64 length); int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info); int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, bool do_finish); void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info); #else /* CONFIG_BLK_DEV_ZONED */ static inline int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info) { return 0; } static inline int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) { return 0; } static inline void btrfs_destroy_dev_zone_info(struct btrfs_device *device) { } /* * In case the kernel is compiled without CONFIG_BLK_DEV_ZONED we'll never call * into btrfs_clone_dev_zone_info() so it's safe to return NULL here. */ static inline struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info( struct btrfs_device *orig_dev) { return NULL; } static inline int btrfs_check_zoned_mode(const struct btrfs_fs_info *fs_info) { if (!btrfs_is_zoned(fs_info)) return 0; btrfs_err(fs_info, "zoned block devices support is not enabled"); return -EOPNOTSUPP; } static inline int btrfs_check_mountopts_zoned(const struct btrfs_fs_info *info, unsigned long long *mount_opt) { return 0; } static inline int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw, u64 *bytenr_ret) { *bytenr_ret = btrfs_sb_offset(mirror); return 0; } static inline int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw, u64 *bytenr_ret) { *bytenr_ret = btrfs_sb_offset(mirror); return 0; } static inline int btrfs_advance_sb_log(struct btrfs_device *device, int mirror) { return 0; } static inline int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror) { return 0; } static inline u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start, u64 hole_end, u64 num_bytes) { return hole_start; } static inline int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical, u64 length, u64 *bytes) { *bytes = 0; return 0; } static inline int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size) { return 0; } static inline int btrfs_load_block_group_zone_info( struct btrfs_block_group *cache, bool new) { return 0; } static inline void btrfs_calc_zone_unusable(struct btrfs_block_group *cache) { } static inline bool btrfs_use_zone_append(struct btrfs_bio *bbio) { return false; } static inline void btrfs_record_physical_zoned(struct btrfs_bio *bbio) { } static inline int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info, struct btrfs_eb_write_context *ctx) { return 0; } static inline int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length) { return -EOPNOTSUPP; } static inline int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical, u64 physical_start, u64 physical_pos) { return -EOPNOTSUPP; } static inline bool btrfs_zone_activate(struct btrfs_block_group *block_group) { return true; } static inline int btrfs_zone_finish(struct btrfs_block_group *block_group) { return 0; } static inline bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags) { return true; } static inline void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length) { } static inline void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg, struct extent_buffer *eb) { } static inline void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) { } static inline void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) { } static inline bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info) { return false; } static inline void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical, u64 length) { } static inline int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info) { return 1; } static inline int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, bool do_finish) { /* Consider all the block groups are active */ return 0; } static inline void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info) { } #endif static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos) { struct btrfs_zoned_device_info *zone_info = device->zone_info; if (!zone_info) return false; return test_bit(pos >> zone_info->zone_size_shift, zone_info->seq_zones); } static inline bool btrfs_dev_is_empty_zone(struct btrfs_device *device, u64 pos) { struct btrfs_zoned_device_info *zone_info = device->zone_info; if (!zone_info) return true; return test_bit(pos >> zone_info->zone_size_shift, zone_info->empty_zones); } static inline void btrfs_dev_set_empty_zone_bit(struct btrfs_device *device, u64 pos, bool set) { struct btrfs_zoned_device_info *zone_info = device->zone_info; unsigned int zno; if (!zone_info) return; zno = pos >> zone_info->zone_size_shift; if (set) set_bit(zno, zone_info->empty_zones); else clear_bit(zno, zone_info->empty_zones); } static inline void btrfs_dev_set_zone_empty(struct btrfs_device *device, u64 pos) { btrfs_dev_set_empty_zone_bit(device, pos, true); } static inline void btrfs_dev_clear_zone_empty(struct btrfs_device *device, u64 pos) { btrfs_dev_set_empty_zone_bit(device, pos, false); } static inline bool btrfs_check_device_zone_type(const struct btrfs_fs_info *fs_info, struct block_device *bdev) { if (btrfs_is_zoned(fs_info)) { /* * We can allow a regular device on a zoned filesystem, because * we will emulate the zoned capabilities. */ if (!bdev_is_zoned(bdev)) return true; return fs_info->zone_size == (bdev_zone_sectors(bdev) << SECTOR_SHIFT); } /* Do not allow Host Managed zoned device. */ return !bdev_is_zoned(bdev); } static inline bool btrfs_check_super_location(struct btrfs_device *device, u64 pos) { /* * On a non-zoned device, any address is OK. On a zoned device, * non-SEQUENTIAL WRITE REQUIRED zones are capable. */ return device->zone_info == NULL || !btrfs_dev_is_sequential(device, pos); } static inline bool btrfs_can_zone_reset(struct btrfs_device *device, u64 physical, u64 length) { u64 zone_size; if (!btrfs_dev_is_sequential(device, physical)) return false; zone_size = device->zone_info->zone_size; if (!IS_ALIGNED(physical, zone_size) || !IS_ALIGNED(length, zone_size)) return false; return true; } static inline void btrfs_zoned_meta_io_lock(struct btrfs_fs_info *fs_info) { if (!btrfs_is_zoned(fs_info)) return; mutex_lock(&fs_info->zoned_meta_io_lock); } static inline void btrfs_zoned_meta_io_unlock(struct btrfs_fs_info *fs_info) { if (!btrfs_is_zoned(fs_info)) return; mutex_unlock(&fs_info->zoned_meta_io_lock); } static inline void btrfs_clear_treelog_bg(struct btrfs_block_group *bg) { struct btrfs_fs_info *fs_info = bg->fs_info; if (!btrfs_is_zoned(fs_info)) return; spin_lock(&fs_info->treelog_bg_lock); if (fs_info->treelog_bg == bg->start) fs_info->treelog_bg = 0; spin_unlock(&fs_info->treelog_bg_lock); } static inline void btrfs_zoned_data_reloc_lock(struct btrfs_inode *inode) { struct btrfs_root *root = inode->root; if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info)) mutex_lock(&root->fs_info->zoned_data_reloc_io_lock); } static inline void btrfs_zoned_data_reloc_unlock(struct btrfs_inode *inode) { struct btrfs_root *root = inode->root; if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info)) mutex_unlock(&root->fs_info->zoned_data_reloc_io_lock); } static inline bool btrfs_zoned_bg_is_full(const struct btrfs_block_group *bg) { ASSERT(btrfs_is_zoned(bg->fs_info)); return (bg->alloc_offset == bg->zone_capacity); } #endif
63 99 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2008 Oracle. All rights reserved. */ #ifndef BTRFS_LOCKING_H #define BTRFS_LOCKING_H #include <linux/atomic.h> #include <linux/wait.h> #include <linux/lockdep.h> #include <linux/percpu_counter.h> #include "extent_io.h" struct extent_buffer; struct btrfs_path; struct btrfs_root; #define BTRFS_WRITE_LOCK 1 #define BTRFS_READ_LOCK 2 /* * We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at * the time of this patch is 8, which is how many we use. Keep this in mind if * you decide you want to add another subclass. */ enum btrfs_lock_nesting { BTRFS_NESTING_NORMAL, /* * When we COW a block we are holding the lock on the original block, * and since our lockdep maps are rootid+level, this confuses lockdep * when we lock the newly allocated COW'd block. Handle this by having * a subclass for COW'ed blocks so that lockdep doesn't complain. */ BTRFS_NESTING_COW, /* * Oftentimes we need to lock adjacent nodes on the same level while * still holding the lock on the original node we searched to, such as * for searching forward or for split/balance. * * Because of this we need to indicate to lockdep that this is * acceptable by having a different subclass for each of these * operations. */ BTRFS_NESTING_LEFT, BTRFS_NESTING_RIGHT, /* * When splitting we will be holding a lock on the left/right node when * we need to cow that node, thus we need a new set of subclasses for * these two operations. */ BTRFS_NESTING_LEFT_COW, BTRFS_NESTING_RIGHT_COW, /* * When splitting we may push nodes to the left or right, but still use * the subsequent nodes in our path, keeping our locks on those adjacent * blocks. Thus when we go to allocate a new split block we've already * used up all of our available subclasses, so this subclass exists to * handle this case where we need to allocate a new split block. */ BTRFS_NESTING_SPLIT, /* * When promoting a new block to a root we need to have a special * subclass so we don't confuse lockdep, as it will appear that we are * locking a higher level node before a lower level one. Copying also * has this problem as it appears we're locking the same block again * when we make a snapshot of an existing root. */ BTRFS_NESTING_NEW_ROOT, /* * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so * add this in here and add a static_assert to keep us from going over * the limit. As of this writing we're limited to 8, and we're * definitely using 8, hence this check to keep us from messing up in * the future. */ BTRFS_NESTING_MAX, }; enum btrfs_lockdep_trans_states { BTRFS_LOCKDEP_TRANS_COMMIT_PREP, BTRFS_LOCKDEP_TRANS_UNBLOCKED, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED, BTRFS_LOCKDEP_TRANS_COMPLETED, }; /* * Lockdep annotation for wait events. * * @owner: The struct where the lockdep map is defined * @lock: The lockdep map corresponding to a wait event * * This macro is used to annotate a wait event. In this case a thread acquires * the lockdep map as writer (exclusive lock) because it has to block until all * the threads that hold the lock as readers signal the condition for the wait * event and release their locks. */ #define btrfs_might_wait_for_event(owner, lock) \ do { \ rwsem_acquire(&owner->lock##_map, 0, 0, _THIS_IP_); \ rwsem_release(&owner->lock##_map, _THIS_IP_); \ } while (0) /* * Protection for the resource/condition of a wait event. * * @owner: The struct where the lockdep map is defined * @lock: The lockdep map corresponding to a wait event * * Many threads can modify the condition for the wait event at the same time * and signal the threads that block on the wait event. The threads that modify * the condition and do the signaling acquire the lock as readers (shared * lock). */ #define btrfs_lockdep_acquire(owner, lock) \ rwsem_acquire_read(&owner->lock##_map, 0, 0, _THIS_IP_) /* * Used after signaling the condition for a wait event to release the lockdep * map held by a reader thread. */ #define btrfs_lockdep_release(owner, lock) \ rwsem_release(&owner->lock##_map, _THIS_IP_) /* * Macros for the transaction states wait events, similar to the generic wait * event macros. */ #define btrfs_might_wait_for_state(owner, i) \ do { \ rwsem_acquire(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_); \ rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_); \ } while (0) #define btrfs_trans_state_lockdep_acquire(owner, i) \ rwsem_acquire_read(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_) #define btrfs_trans_state_lockdep_release(owner, i) \ rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_) /* Initialization of the lockdep map */ #define btrfs_lockdep_init_map(owner, lock) \ do { \ static struct lock_class_key lock##_key; \ lockdep_init_map(&owner->lock##_map, #lock, &lock##_key, 0); \ } while (0) /* Initialization of the transaction states lockdep maps. */ #define btrfs_state_lockdep_init_map(owner, lock, state) \ do { \ static struct lock_class_key lock##_key; \ lockdep_init_map(&owner->btrfs_state_change_map[state], #lock, \ &lock##_key, 0); \ } while (0) static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES, "too many lock subclasses defined"); void btrfs_tree_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest); static inline void btrfs_tree_lock(struct extent_buffer *eb) { btrfs_tree_lock_nested(eb, BTRFS_NESTING_NORMAL); } void btrfs_tree_unlock(struct extent_buffer *eb); void btrfs_tree_read_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest); static inline void btrfs_tree_read_lock(struct extent_buffer *eb) { btrfs_tree_read_lock_nested(eb, BTRFS_NESTING_NORMAL); } void btrfs_tree_read_unlock(struct extent_buffer *eb); int btrfs_try_tree_read_lock(struct extent_buffer *eb); int btrfs_try_tree_write_lock(struct extent_buffer *eb); struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root); struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root); #ifdef CONFIG_BTRFS_DEBUG static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb) { lockdep_assert_held_write(&eb->lock); } #else static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb) { } #endif void btrfs_unlock_up_safe(struct btrfs_path *path, int level); static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) { if (rw == BTRFS_WRITE_LOCK) btrfs_tree_unlock(eb); else if (rw == BTRFS_READ_LOCK) btrfs_tree_read_unlock(eb); else BUG(); } struct btrfs_drew_lock { atomic_t readers; atomic_t writers; wait_queue_head_t pending_writers; wait_queue_head_t pending_readers; }; void btrfs_drew_lock_init(struct btrfs_drew_lock *lock); void btrfs_drew_write_lock(struct btrfs_drew_lock *lock); bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock); void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock); void btrfs_drew_read_lock(struct btrfs_drew_lock *lock); void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock); #ifdef CONFIG_DEBUG_LOCK_ALLOC void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level); void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb); #else static inline void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level) { } static inline void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb) { } #endif #endif
464 464 89 89 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MEMREMAP_H_ #define _LINUX_MEMREMAP_H_ #include <linux/mmzone.h> #include <linux/range.h> #include <linux/ioport.h> #include <linux/percpu-refcount.h> struct resource; struct device; /** * struct vmem_altmap - pre-allocated storage for vmemmap_populate * @base_pfn: base of the entire dev_pagemap mapping * @reserve: pages mapped, but reserved for driver use (relative to @base) * @free: free pages set aside in the mapping for memmap storage * @align: pages reserved to meet allocation alignments * @alloc: track pages consumed, private to vmemmap_populate() */ struct vmem_altmap { unsigned long base_pfn; const unsigned long end_pfn; const unsigned long reserve; unsigned long free; unsigned long align; unsigned long alloc; bool inaccessible; }; /* * Specialize ZONE_DEVICE memory into multiple types each has a different * usage. * * MEMORY_DEVICE_PRIVATE: * Device memory that is not directly addressable by the CPU: CPU can neither * read nor write private memory. In this case, we do still have struct pages * backing the device memory. Doing so simplifies the implementation, but it is * important to remember that there are certain points at which the struct page * must be treated as an opaque object, rather than a "normal" struct page. * * A more complete discussion of unaddressable memory may be found in * include/linux/hmm.h and Documentation/mm/hmm.rst. * * MEMORY_DEVICE_COHERENT: * Device memory that is cache coherent from device and CPU point of view. This * is used on platforms that have an advanced system bus (like CAPI or CXL). A * driver can hotplug the device memory using ZONE_DEVICE and with that memory * type. Any page of a process can be migrated to such memory. However no one * should be allowed to pin such memory so that it can always be evicted. * * MEMORY_DEVICE_FS_DAX: * Host memory that has similar access semantics as System RAM i.e. DMA * coherent and supports page pinning. In support of coordinating page * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a * wakeup event whenever a page is unpinned and becomes idle. This * wakeup is used to coordinate physical address space management (ex: * fs truncate/hole punch) vs pinned pages (ex: device dma). * * MEMORY_DEVICE_GENERIC: * Host memory that has similar access semantics as System RAM i.e. DMA * coherent and supports page pinning. This is for example used by DAX devices * that expose memory using a character device. * * MEMORY_DEVICE_PCI_P2PDMA: * Device memory residing in a PCI BAR intended for use with Peer-to-Peer * transactions. */ enum memory_type { /* 0 is reserved to catch uninitialized type fields */ MEMORY_DEVICE_PRIVATE = 1, MEMORY_DEVICE_COHERENT, MEMORY_DEVICE_FS_DAX, MEMORY_DEVICE_GENERIC, MEMORY_DEVICE_PCI_P2PDMA, }; struct dev_pagemap_ops { /* * Called once the page refcount reaches 0. The reference count will be * reset to one by the core code after the method is called to prepare * for handing out the page again. */ void (*page_free)(struct page *page); /* * Used for private (un-addressable) device memory only. Must migrate * the page back to a CPU accessible page. */ vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf); /* * Handle the memory failure happens on a range of pfns. Notify the * processes who are using these pfns, and try to recover the data on * them if necessary. The mf_flags is finally passed to the recover * function through the whole notify routine. * * When this is not implemented, or it returns -EOPNOTSUPP, the caller * will fall back to a common handler called mf_generic_kill_procs(). */ int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn, unsigned long nr_pages, int mf_flags); }; #define PGMAP_ALTMAP_VALID (1 << 0) /** * struct dev_pagemap - metadata for ZONE_DEVICE mappings * @altmap: pre-allocated/reserved memory for vmemmap allocations * @ref: reference count that pins the devm_memremap_pages() mapping * @done: completion for @ref * @type: memory type: see MEMORY_* above in memremap.h * @flags: PGMAP_* flags to specify defailed behavior * @vmemmap_shift: structural definition of how the vmemmap page metadata * is populated, specifically the metadata page order. * A zero value (default) uses base pages as the vmemmap metadata * representation. A bigger value will set up compound struct pages * of the requested order value. * @ops: method table * @owner: an opaque pointer identifying the entity that manages this * instance. Used by various helpers to make sure that no * foreign ZONE_DEVICE memory is accessed. * @nr_range: number of ranges to be mapped * @range: range to be mapped when nr_range == 1 * @ranges: array of ranges to be mapped when nr_range > 1 */ struct dev_pagemap { struct vmem_altmap altmap; struct percpu_ref ref; struct completion done; enum memory_type type; unsigned int flags; unsigned long vmemmap_shift; const struct dev_pagemap_ops *ops; void *owner; int nr_range; union { struct range range; DECLARE_FLEX_ARRAY(struct range, ranges); }; }; static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap) { return pgmap->ops && pgmap->ops->memory_failure; } static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) { if (pgmap->flags & PGMAP_ALTMAP_VALID) return &pgmap->altmap; return NULL; } static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap) { return 1 << pgmap->vmemmap_shift; } static inline bool is_device_private_page(const struct page *page) { return IS_ENABLED(CONFIG_DEVICE_PRIVATE) && is_zone_device_page(page) && page->pgmap->type == MEMORY_DEVICE_PRIVATE; } static inline bool folio_is_device_private(const struct folio *folio) { return is_device_private_page(&folio->page); } static inline bool is_pci_p2pdma_page(const struct page *page) { return IS_ENABLED(CONFIG_PCI_P2PDMA) && is_zone_device_page(page) && page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; } static inline bool is_device_coherent_page(const struct page *page) { return is_zone_device_page(page) && page->pgmap->type == MEMORY_DEVICE_COHERENT; } static inline bool folio_is_device_coherent(const struct folio *folio) { return is_device_coherent_page(&folio->page); } #ifdef CONFIG_ZONE_DEVICE void zone_device_page_init(struct page *page); void *memremap_pages(struct dev_pagemap *pgmap, int nid); void memunmap_pages(struct dev_pagemap *pgmap); void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); struct dev_pagemap *get_dev_pagemap(unsigned long pfn, struct dev_pagemap *pgmap); bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn); unsigned long memremap_compat_align(void); #else static inline void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) { /* * Fail attempts to call devm_memremap_pages() without * ZONE_DEVICE support enabled, this requires callers to fall * back to plain devm_memremap() based on config */ WARN_ON_ONCE(1); return ERR_PTR(-ENXIO); } static inline void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap) { } static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, struct dev_pagemap *pgmap) { return NULL; } static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) { return false; } /* when memremap_pages() is disabled all archs can remap a single page */ static inline unsigned long memremap_compat_align(void) { return PAGE_SIZE; } #endif /* CONFIG_ZONE_DEVICE */ static inline void put_dev_pagemap(struct dev_pagemap *pgmap) { if (pgmap) percpu_ref_put(&pgmap->ref); } #endif /* _LINUX_MEMREMAP_H_ */
4 3 1 4 3 1 4 3 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/namei.h> #include <linux/io_uring.h> #include <linux/fsnotify.h> #include <uapi/linux/io_uring.h> #include "io_uring.h" #include "sync.h" struct io_sync { struct file *file; loff_t len; loff_t off; int flags; int mode; }; int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) return -EINVAL; sync->off = READ_ONCE(sqe->off); sync->len = READ_ONCE(sqe->len); sync->flags = READ_ONCE(sqe->sync_range_flags); req->flags |= REQ_F_FORCE_ASYNC; return 0; } int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); int ret; /* sync_file_range always requires a blocking context */ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); ret = sync_file_range(req->file, sync->off, sync->len, sync->flags); io_req_set_res(req, ret, 0); return IOU_OK; } int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) return -EINVAL; sync->flags = READ_ONCE(sqe->fsync_flags); if (unlikely(sync->flags & ~IORING_FSYNC_DATASYNC)) return -EINVAL; sync->off = READ_ONCE(sqe->off); sync->len = READ_ONCE(sqe->len); req->flags |= REQ_F_FORCE_ASYNC; return 0; } int io_fsync(struct io_kiocb *req, unsigned int issue_flags) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); loff_t end = sync->off + sync->len; int ret; /* fsync always requires a blocking context */ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); ret = vfs_fsync_range(req->file, sync->off, end > 0 ? end : LLONG_MAX, sync->flags & IORING_FSYNC_DATASYNC); io_req_set_res(req, ret, 0); return IOU_OK; } int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); if (sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) return -EINVAL; sync->off = READ_ONCE(sqe->off); sync->len = READ_ONCE(sqe->addr); sync->mode = READ_ONCE(sqe->len); req->flags |= REQ_F_FORCE_ASYNC; return 0; } int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); int ret; /* fallocate always requiring blocking context */ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); ret = vfs_fallocate(req->file, sync->mode, sync->off, sync->len); if (ret >= 0) fsnotify_modify(req->file); io_req_set_res(req, ret, 0); return IOU_OK; }
1 1 31 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 /* * linux/drivers/video/fb_notify.c * * Copyright (C) 2006 Antonino Daplas <adaplas@pol.net> * * 2001 - Documented with DocBook * - Brad Douglas <brad@neruo.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/fb.h> #include <linux/notifier.h> #include <linux/export.h> static BLOCKING_NOTIFIER_HEAD(fb_notifier_list); /** * fb_register_client - register a client notifier * @nb: notifier block to callback on events * * Return: 0 on success, negative error code on failure. */ int fb_register_client(struct notifier_block *nb) { return blocking_notifier_chain_register(&fb_notifier_list, nb); } EXPORT_SYMBOL(fb_register_client); /** * fb_unregister_client - unregister a client notifier * @nb: notifier block to callback on events * * Return: 0 on success, negative error code on failure. */ int fb_unregister_client(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&fb_notifier_list, nb); } EXPORT_SYMBOL(fb_unregister_client); /** * fb_notifier_call_chain - notify clients of fb_events * @val: value passed to callback * @v: pointer passed to callback * * Return: The return value of the last notifier function */ int fb_notifier_call_chain(unsigned long val, void *v) { return blocking_notifier_call_chain(&fb_notifier_list, val, v); } EXPORT_SYMBOL_GPL(fb_notifier_call_chain);
7 6 1 7 1 4 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 /* * super.c * * Copyright (C) 2001-2002 Will Dyson <will_dyson@pobox.com> * * Licensed under the GNU GPL. See the file COPYING for details. * */ #include <linux/fs.h> #include <asm/page.h> /* for PAGE_SIZE */ #include "befs.h" #include "super.h" /* * befs_load_sb -- Read from disk and properly byteswap all the fields * of the befs superblock */ int befs_load_sb(struct super_block *sb, befs_super_block *disk_sb) { struct befs_sb_info *befs_sb = BEFS_SB(sb); /* Check the byte order of the filesystem */ if (disk_sb->fs_byte_order == BEFS_BYTEORDER_NATIVE_LE) befs_sb->byte_order = BEFS_BYTESEX_LE; else if (disk_sb->fs_byte_order == BEFS_BYTEORDER_NATIVE_BE) befs_sb->byte_order = BEFS_BYTESEX_BE; befs_sb->magic1 = fs32_to_cpu(sb, disk_sb->magic1); befs_sb->magic2 = fs32_to_cpu(sb, disk_sb->magic2); befs_sb->magic3 = fs32_to_cpu(sb, disk_sb->magic3); befs_sb->block_size = fs32_to_cpu(sb, disk_sb->block_size); befs_sb->block_shift = fs32_to_cpu(sb, disk_sb->block_shift); befs_sb->num_blocks = fs64_to_cpu(sb, disk_sb->num_blocks); befs_sb->used_blocks = fs64_to_cpu(sb, disk_sb->used_blocks); befs_sb->inode_size = fs32_to_cpu(sb, disk_sb->inode_size); befs_sb->blocks_per_ag = fs32_to_cpu(sb, disk_sb->blocks_per_ag); befs_sb->ag_shift = fs32_to_cpu(sb, disk_sb->ag_shift); befs_sb->num_ags = fs32_to_cpu(sb, disk_sb->num_ags); befs_sb->flags = fs32_to_cpu(sb, disk_sb->flags); befs_sb->log_blocks = fsrun_to_cpu(sb, disk_sb->log_blocks); befs_sb->log_start = fs64_to_cpu(sb, disk_sb->log_start); befs_sb->log_end = fs64_to_cpu(sb, disk_sb->log_end); befs_sb->root_dir = fsrun_to_cpu(sb, disk_sb->root_dir); befs_sb->indices = fsrun_to_cpu(sb, disk_sb->indices); befs_sb->nls = NULL; return BEFS_OK; } int befs_check_sb(struct super_block *sb) { struct befs_sb_info *befs_sb = BEFS_SB(sb); /* Check magic headers of super block */ if ((befs_sb->magic1 != BEFS_SUPER_MAGIC1) || (befs_sb->magic2 != BEFS_SUPER_MAGIC2) || (befs_sb->magic3 != BEFS_SUPER_MAGIC3)) { befs_error(sb, "invalid magic header"); return BEFS_ERR; } /* * Check blocksize of BEFS. * * Blocksize of BEFS is 1024, 2048, 4096 or 8192. */ if ((befs_sb->block_size != 1024) && (befs_sb->block_size != 2048) && (befs_sb->block_size != 4096) && (befs_sb->block_size != 8192)) { befs_error(sb, "invalid blocksize: %u", befs_sb->block_size); return BEFS_ERR; } if (befs_sb->block_size > PAGE_SIZE) { befs_error(sb, "blocksize(%u) cannot be larger " "than system pagesize(%lu)", befs_sb->block_size, PAGE_SIZE); return BEFS_ERR; } /* * block_shift and block_size encode the same information * in different ways as a consistency check. */ if ((1 << befs_sb->block_shift) != befs_sb->block_size) { befs_error(sb, "block_shift disagrees with block_size. " "Corruption likely."); return BEFS_ERR; } /* ag_shift also encodes the same information as blocks_per_ag in a * different way, non-fatal consistency check */ if ((1 << befs_sb->ag_shift) != befs_sb->blocks_per_ag) befs_error(sb, "ag_shift disagrees with blocks_per_ag."); if (befs_sb->log_start != befs_sb->log_end || befs_sb->flags == BEFS_DIRTY) { befs_error(sb, "Filesystem not clean! There are blocks in the " "journal. You must boot into BeOS and mount this " "volume to make it clean."); return BEFS_ERR; } return BEFS_OK; }
1 1 1 1 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 // SPDX-License-Identifier: GPL-2.0+ /* * comedi/drivers/ni_usb6501.c * Comedi driver for National Instruments USB-6501 * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2014 Luca Ellero <luca.ellero@brickedbrain.com> */ /* * Driver: ni_usb6501 * Description: National Instruments USB-6501 module * Devices: [National Instruments] USB-6501 (ni_usb6501) * Author: Luca Ellero <luca.ellero@brickedbrain.com> * Updated: 8 Sep 2014 * Status: works * * * Configuration Options: * none */ /* * NI-6501 - USB PROTOCOL DESCRIPTION * * Every command is composed by two USB packets: * - request (out) * - response (in) * * Every packet is at least 12 bytes long, here is the meaning of * every field (all values are hex): * * byte 0 is always 00 * byte 1 is always 01 * byte 2 is always 00 * byte 3 is the total packet length * * byte 4 is always 00 * byte 5 is the total packet length - 4 * byte 6 is always 01 * byte 7 is the command * * byte 8 is 02 (request) or 00 (response) * byte 9 is 00 (response) or 10 (port request) or 20 (counter request) * byte 10 is always 00 * byte 11 is 00 (request) or 02 (response) * * PORT PACKETS * * CMD: 0xE READ_PORT * REQ: 00 01 00 10 00 0C 01 0E 02 10 00 00 00 03 <PORT> 00 * RES: 00 01 00 10 00 0C 01 00 00 00 00 02 00 03 <BMAP> 00 * * CMD: 0xF WRITE_PORT * REQ: 00 01 00 14 00 10 01 0F 02 10 00 00 00 03 <PORT> 00 03 <BMAP> 00 00 * RES: 00 01 00 0C 00 08 01 00 00 00 00 02 * * CMD: 0x12 SET_PORT_DIR (0 = input, 1 = output) * REQ: 00 01 00 18 00 14 01 12 02 10 00 00 * 00 05 <PORT 0> <PORT 1> <PORT 2> 00 05 00 00 00 00 00 * RES: 00 01 00 0C 00 08 01 00 00 00 00 02 * * COUNTER PACKETS * * CMD 0x9: START_COUNTER * REQ: 00 01 00 0C 00 08 01 09 02 20 00 00 * RES: 00 01 00 0C 00 08 01 00 00 00 00 02 * * CMD 0xC: STOP_COUNTER * REQ: 00 01 00 0C 00 08 01 0C 02 20 00 00 * RES: 00 01 00 0C 00 08 01 00 00 00 00 02 * * CMD 0xE: READ_COUNTER * REQ: 00 01 00 0C 00 08 01 0E 02 20 00 00 * RES: 00 01 00 10 00 0C 01 00 00 00 00 02 <u32 counter value, Big Endian> * * CMD 0xF: WRITE_COUNTER * REQ: 00 01 00 10 00 0C 01 0F 02 20 00 00 <u32 counter value, Big Endian> * RES: 00 01 00 0C 00 08 01 00 00 00 00 02 * * * Please visit https://www.brickedbrain.com if you need * additional information or have any questions. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/comedi/comedi_usb.h> #define NI6501_TIMEOUT 1000 /* Port request packets */ static const u8 READ_PORT_REQUEST[] = {0x00, 0x01, 0x00, 0x10, 0x00, 0x0C, 0x01, 0x0E, 0x02, 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00}; static const u8 WRITE_PORT_REQUEST[] = {0x00, 0x01, 0x00, 0x14, 0x00, 0x10, 0x01, 0x0F, 0x02, 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00}; static const u8 SET_PORT_DIR_REQUEST[] = {0x00, 0x01, 0x00, 0x18, 0x00, 0x14, 0x01, 0x12, 0x02, 0x10, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00}; /* Counter request packets */ static const u8 START_COUNTER_REQUEST[] = {0x00, 0x01, 0x00, 0x0C, 0x00, 0x08, 0x01, 0x09, 0x02, 0x20, 0x00, 0x00}; static const u8 STOP_COUNTER_REQUEST[] = {0x00, 0x01, 0x00, 0x0C, 0x00, 0x08, 0x01, 0x0C, 0x02, 0x20, 0x00, 0x00}; static const u8 READ_COUNTER_REQUEST[] = {0x00, 0x01, 0x00, 0x0C, 0x00, 0x08, 0x01, 0x0E, 0x02, 0x20, 0x00, 0x00}; static const u8 WRITE_COUNTER_REQUEST[] = {0x00, 0x01, 0x00, 0x10, 0x00, 0x0C, 0x01, 0x0F, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; /* Response packets */ static const u8 GENERIC_RESPONSE[] = {0x00, 0x01, 0x00, 0x0C, 0x00, 0x08, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02}; static const u8 READ_PORT_RESPONSE[] = {0x00, 0x01, 0x00, 0x10, 0x00, 0x0C, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x03, 0x00, 0x00}; static const u8 READ_COUNTER_RESPONSE[] = {0x00, 0x01, 0x00, 0x10, 0x00, 0x0C, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00}; /* Largest supported packets */ static const size_t TX_MAX_SIZE = sizeof(SET_PORT_DIR_REQUEST); static const size_t RX_MAX_SIZE = sizeof(READ_PORT_RESPONSE); enum commands { READ_PORT, WRITE_PORT, SET_PORT_DIR, START_COUNTER, STOP_COUNTER, READ_COUNTER, WRITE_COUNTER }; struct ni6501_private { struct usb_endpoint_descriptor *ep_rx; struct usb_endpoint_descriptor *ep_tx; struct mutex mut; u8 *usb_rx_buf; u8 *usb_tx_buf; }; static int ni6501_port_command(struct comedi_device *dev, int command, unsigned int val, u8 *bitmap) { struct usb_device *usb = comedi_to_usb_dev(dev); struct ni6501_private *devpriv = dev->private; int request_size, response_size; u8 *tx = devpriv->usb_tx_buf; int ret; if (command != SET_PORT_DIR && !bitmap) return -EINVAL; mutex_lock(&devpriv->mut); switch (command) { case READ_PORT: request_size = sizeof(READ_PORT_REQUEST); response_size = sizeof(READ_PORT_RESPONSE); memcpy(tx, READ_PORT_REQUEST, request_size); tx[14] = val & 0xff; break; case WRITE_PORT: request_size = sizeof(WRITE_PORT_REQUEST); response_size = sizeof(GENERIC_RESPONSE); memcpy(tx, WRITE_PORT_REQUEST, request_size); tx[14] = val & 0xff; tx[17] = *bitmap; break; case SET_PORT_DIR: request_size = sizeof(SET_PORT_DIR_REQUEST); response_size = sizeof(GENERIC_RESPONSE); memcpy(tx, SET_PORT_DIR_REQUEST, request_size); tx[14] = val & 0xff; tx[15] = (val >> 8) & 0xff; tx[16] = (val >> 16) & 0xff; break; default: ret = -EINVAL; goto end; } ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->ep_tx->bEndpointAddress), devpriv->usb_tx_buf, request_size, NULL, NI6501_TIMEOUT); if (ret) goto end; ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->ep_rx->bEndpointAddress), devpriv->usb_rx_buf, response_size, NULL, NI6501_TIMEOUT); if (ret) goto end; /* Check if results are valid */ if (command == READ_PORT) { *bitmap = devpriv->usb_rx_buf[14]; /* mask bitmap for comparing */ devpriv->usb_rx_buf[14] = 0x00; if (memcmp(devpriv->usb_rx_buf, READ_PORT_RESPONSE, sizeof(READ_PORT_RESPONSE))) { ret = -EINVAL; } } else if (memcmp(devpriv->usb_rx_buf, GENERIC_RESPONSE, sizeof(GENERIC_RESPONSE))) { ret = -EINVAL; } end: mutex_unlock(&devpriv->mut); return ret; } static int ni6501_counter_command(struct comedi_device *dev, int command, u32 *val) { struct usb_device *usb = comedi_to_usb_dev(dev); struct ni6501_private *devpriv = dev->private; int request_size, response_size; u8 *tx = devpriv->usb_tx_buf; int ret; if ((command == READ_COUNTER || command == WRITE_COUNTER) && !val) return -EINVAL; mutex_lock(&devpriv->mut); switch (command) { case START_COUNTER: request_size = sizeof(START_COUNTER_REQUEST); response_size = sizeof(GENERIC_RESPONSE); memcpy(tx, START_COUNTER_REQUEST, request_size); break; case STOP_COUNTER: request_size = sizeof(STOP_COUNTER_REQUEST); response_size = sizeof(GENERIC_RESPONSE); memcpy(tx, STOP_COUNTER_REQUEST, request_size); break; case READ_COUNTER: request_size = sizeof(READ_COUNTER_REQUEST); response_size = sizeof(READ_COUNTER_RESPONSE); memcpy(tx, READ_COUNTER_REQUEST, request_size); break; case WRITE_COUNTER: request_size = sizeof(WRITE_COUNTER_REQUEST); response_size = sizeof(GENERIC_RESPONSE); memcpy(tx, WRITE_COUNTER_REQUEST, request_size); /* Setup tx packet: bytes 12,13,14,15 hold the */ /* u32 counter value (Big Endian) */ *((__be32 *)&tx[12]) = cpu_to_be32(*val); break; default: ret = -EINVAL; goto end; } ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->ep_tx->bEndpointAddress), devpriv->usb_tx_buf, request_size, NULL, NI6501_TIMEOUT); if (ret) goto end; ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->ep_rx->bEndpointAddress), devpriv->usb_rx_buf, response_size, NULL, NI6501_TIMEOUT); if (ret) goto end; /* Check if results are valid */ if (command == READ_COUNTER) { int i; /* Read counter value: bytes 12,13,14,15 of rx packet */ /* hold the u32 counter value (Big Endian) */ *val = be32_to_cpu(*((__be32 *)&devpriv->usb_rx_buf[12])); /* mask counter value for comparing */ for (i = 12; i < sizeof(READ_COUNTER_RESPONSE); ++i) devpriv->usb_rx_buf[i] = 0x00; if (memcmp(devpriv->usb_rx_buf, READ_COUNTER_RESPONSE, sizeof(READ_COUNTER_RESPONSE))) { ret = -EINVAL; } } else if (memcmp(devpriv->usb_rx_buf, GENERIC_RESPONSE, sizeof(GENERIC_RESPONSE))) { ret = -EINVAL; } end: mutex_unlock(&devpriv->mut); return ret; } static int ni6501_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int ret; ret = comedi_dio_insn_config(dev, s, insn, data, 0); if (ret) return ret; ret = ni6501_port_command(dev, SET_PORT_DIR, s->io_bits, NULL); if (ret) return ret; return insn->n; } static int ni6501_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int mask; int ret; u8 port; u8 bitmap; mask = comedi_dio_update_state(s, data); for (port = 0; port < 3; port++) { if (mask & (0xFF << port * 8)) { bitmap = (s->state >> port * 8) & 0xFF; ret = ni6501_port_command(dev, WRITE_PORT, port, &bitmap); if (ret) return ret; } } data[1] = 0; for (port = 0; port < 3; port++) { ret = ni6501_port_command(dev, READ_PORT, port, &bitmap); if (ret) return ret; data[1] |= bitmap << port * 8; } return insn->n; } static int ni6501_cnt_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int ret; u32 val = 0; switch (data[0]) { case INSN_CONFIG_ARM: ret = ni6501_counter_command(dev, START_COUNTER, NULL); break; case INSN_CONFIG_DISARM: ret = ni6501_counter_command(dev, STOP_COUNTER, NULL); break; case INSN_CONFIG_RESET: ret = ni6501_counter_command(dev, STOP_COUNTER, NULL); if (ret) break; ret = ni6501_counter_command(dev, WRITE_COUNTER, &val); break; default: return -EINVAL; } return ret ? ret : insn->n; } static int ni6501_cnt_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int ret; u32 val; unsigned int i; for (i = 0; i < insn->n; i++) { ret = ni6501_counter_command(dev, READ_COUNTER, &val); if (ret) return ret; data[i] = val; } return insn->n; } static int ni6501_cnt_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int ret; if (insn->n) { u32 val = data[insn->n - 1]; ret = ni6501_counter_command(dev, WRITE_COUNTER, &val); if (ret) return ret; } return insn->n; } static int ni6501_alloc_usb_buffers(struct comedi_device *dev) { struct ni6501_private *devpriv = dev->private; size_t size; size = usb_endpoint_maxp(devpriv->ep_rx); devpriv->usb_rx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_rx_buf) return -ENOMEM; size = usb_endpoint_maxp(devpriv->ep_tx); devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_tx_buf) return -ENOMEM; return 0; } static int ni6501_find_endpoints(struct comedi_device *dev) { struct usb_interface *intf = comedi_to_usb_interface(dev); struct ni6501_private *devpriv = dev->private; struct usb_host_interface *iface_desc = intf->cur_altsetting; struct usb_endpoint_descriptor *ep_desc; int i; if (iface_desc->desc.bNumEndpoints != 2) { dev_err(dev->class_dev, "Wrong number of endpoints\n"); return -ENODEV; } for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { ep_desc = &iface_desc->endpoint[i].desc; if (usb_endpoint_is_bulk_in(ep_desc)) { if (!devpriv->ep_rx) devpriv->ep_rx = ep_desc; continue; } if (usb_endpoint_is_bulk_out(ep_desc)) { if (!devpriv->ep_tx) devpriv->ep_tx = ep_desc; continue; } } if (!devpriv->ep_rx || !devpriv->ep_tx) return -ENODEV; if (usb_endpoint_maxp(devpriv->ep_rx) < RX_MAX_SIZE) return -ENODEV; if (usb_endpoint_maxp(devpriv->ep_tx) < TX_MAX_SIZE) return -ENODEV; return 0; } static int ni6501_auto_attach(struct comedi_device *dev, unsigned long context) { struct usb_interface *intf = comedi_to_usb_interface(dev); struct ni6501_private *devpriv; struct comedi_subdevice *s; int ret; devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) return -ENOMEM; mutex_init(&devpriv->mut); usb_set_intfdata(intf, devpriv); ret = ni6501_find_endpoints(dev); if (ret) return ret; ret = ni6501_alloc_usb_buffers(dev); if (ret) return ret; ret = comedi_alloc_subdevices(dev, 2); if (ret) return ret; /* Digital Input/Output subdevice */ s = &dev->subdevices[0]; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 24; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = ni6501_dio_insn_bits; s->insn_config = ni6501_dio_insn_config; /* Counter subdevice */ s = &dev->subdevices[1]; s->type = COMEDI_SUBD_COUNTER; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL; s->n_chan = 1; s->maxdata = 0xffffffff; s->insn_read = ni6501_cnt_insn_read; s->insn_write = ni6501_cnt_insn_write; s->insn_config = ni6501_cnt_insn_config; return 0; } static void ni6501_detach(struct comedi_device *dev) { struct usb_interface *intf = comedi_to_usb_interface(dev); struct ni6501_private *devpriv = dev->private; if (!devpriv) return; mutex_destroy(&devpriv->mut); usb_set_intfdata(intf, NULL); kfree(devpriv->usb_rx_buf); kfree(devpriv->usb_tx_buf); } static struct comedi_driver ni6501_driver = { .module = THIS_MODULE, .driver_name = "ni6501", .auto_attach = ni6501_auto_attach, .detach = ni6501_detach, }; static int ni6501_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { return comedi_usb_auto_config(intf, &ni6501_driver, id->driver_info); } static const struct usb_device_id ni6501_usb_table[] = { { USB_DEVICE(0x3923, 0x718a) }, { } }; MODULE_DEVICE_TABLE(usb, ni6501_usb_table); static struct usb_driver ni6501_usb_driver = { .name = "ni6501", .id_table = ni6501_usb_table, .probe = ni6501_usb_probe, .disconnect = comedi_usb_auto_unconfig, }; module_comedi_usb_driver(ni6501_driver, ni6501_usb_driver); MODULE_AUTHOR("Luca Ellero"); MODULE_DESCRIPTION("Comedi driver for National Instruments USB-6501"); MODULE_LICENSE("GPL");
158 158 20 6 14 15 15 11 4 1 1 10 5 3 3 1 2 1 1 1 1 1 1 1 3 2 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/io_uring.h> #include <uapi/linux/io_uring.h> #include "../kernel/futex/futex.h" #include "io_uring.h" #include "alloc_cache.h" #include "futex.h" struct io_futex { struct file *file; union { u32 __user *uaddr; struct futex_waitv __user *uwaitv; }; unsigned long futex_val; unsigned long futex_mask; unsigned long futexv_owned; u32 futex_flags; unsigned int futex_nr; bool futexv_unqueued; }; struct io_futex_data { struct futex_q q; struct io_kiocb *req; }; #define IO_FUTEX_ALLOC_CACHE_MAX 32 bool io_futex_cache_init(struct io_ring_ctx *ctx) { return io_alloc_cache_init(&ctx->futex_cache, IO_FUTEX_ALLOC_CACHE_MAX, sizeof(struct io_futex_data)); } void io_futex_cache_free(struct io_ring_ctx *ctx) { io_alloc_cache_free(&ctx->futex_cache, kfree); } static void __io_futex_complete(struct io_kiocb *req, struct io_tw_state *ts) { req->async_data = NULL; hlist_del_init(&req->hash_node); io_req_task_complete(req, ts); } static void io_futex_complete(struct io_kiocb *req, struct io_tw_state *ts) { struct io_futex_data *ifd = req->async_data; struct io_ring_ctx *ctx = req->ctx; io_tw_lock(ctx, ts); if (!io_alloc_cache_put(&ctx->futex_cache, ifd)) kfree(ifd); __io_futex_complete(req, ts); } static void io_futexv_complete(struct io_kiocb *req, struct io_tw_state *ts) { struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex); struct futex_vector *futexv = req->async_data; io_tw_lock(req->ctx, ts); if (!iof->futexv_unqueued) { int res; res = futex_unqueue_multiple(futexv, iof->futex_nr); if (res != -1) io_req_set_res(req, res, 0); } kfree(req->async_data); req->flags &= ~REQ_F_ASYNC_DATA; __io_futex_complete(req, ts); } static bool io_futexv_claim(struct io_futex *iof) { if (test_bit(0, &iof->futexv_owned) || test_and_set_bit_lock(0, &iof->futexv_owned)) return false; return true; } static bool __io_futex_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req) { /* futex wake already done or in progress */ if (req->opcode == IORING_OP_FUTEX_WAIT) { struct io_futex_data *ifd = req->async_data; if (!futex_unqueue(&ifd->q)) return false; req->io_task_work.func = io_futex_complete; } else { struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex); if (!io_futexv_claim(iof)) return false; req->io_task_work.func = io_futexv_complete; } hlist_del_init(&req->hash_node); io_req_set_res(req, -ECANCELED, 0); io_req_task_work_add(req); return true; } int io_futex_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, unsigned int issue_flags) { struct hlist_node *tmp; struct io_kiocb *req; int nr = 0; if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_FD_FIXED)) return -ENOENT; io_ring_submit_lock(ctx, issue_flags); hlist_for_each_entry_safe(req, tmp, &ctx->futex_list, hash_node) { if (req->cqe.user_data != cd->data && !(cd->flags & IORING_ASYNC_CANCEL_ANY)) continue; if (__io_futex_cancel(ctx, req)) nr++; if (!(cd->flags & IORING_ASYNC_CANCEL_ALL)) break; } io_ring_submit_unlock(ctx, issue_flags); if (nr) return nr; return -ENOENT; } bool io_futex_remove_all(struct io_ring_ctx *ctx, struct task_struct *task, bool cancel_all) { struct hlist_node *tmp; struct io_kiocb *req; bool found = false; lockdep_assert_held(&ctx->uring_lock); hlist_for_each_entry_safe(req, tmp, &ctx->futex_list, hash_node) { if (!io_match_task_safe(req, task, cancel_all)) continue; hlist_del_init(&req->hash_node); __io_futex_cancel(ctx, req); found = true; } return found; } int io_futex_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex); u32 flags; if (unlikely(sqe->len || sqe->futex_flags || sqe->buf_index || sqe->file_index)) return -EINVAL; iof->uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr)); iof->futex_val = READ_ONCE(sqe->addr2); iof->futex_mask = READ_ONCE(sqe->addr3); flags = READ_ONCE(sqe->fd); if (flags & ~FUTEX2_VALID_MASK) return -EINVAL; iof->futex_flags = futex2_to_flags(flags); if (!futex_flags_valid(iof->futex_flags)) return -EINVAL; if (!futex_validate_input(iof->futex_flags, iof->futex_val) || !futex_validate_input(iof->futex_flags, iof->futex_mask)) return -EINVAL; return 0; } static void io_futex_wakev_fn(struct wake_q_head *wake_q, struct futex_q *q) { struct io_kiocb *req = q->wake_data; struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex); if (!io_futexv_claim(iof)) return; if (unlikely(!__futex_wake_mark(q))) return; io_req_set_res(req, 0, 0); req->io_task_work.func = io_futexv_complete; io_req_task_work_add(req); } int io_futexv_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex); struct futex_vector *futexv; int ret; /* No flags or mask supported for waitv */ if (unlikely(sqe->fd || sqe->buf_index || sqe->file_index || sqe->addr2 || sqe->futex_flags || sqe->addr3)) return -EINVAL; iof->uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr)); iof->futex_nr = READ_ONCE(sqe->len); if (!iof->futex_nr || iof->futex_nr > FUTEX_WAITV_MAX) return -EINVAL; futexv = kcalloc(iof->futex_nr, sizeof(*futexv), GFP_KERNEL); if (!futexv) return -ENOMEM; ret = futex_parse_waitv(futexv, iof->uwaitv, iof->futex_nr, io_futex_wakev_fn, req); if (ret) { kfree(futexv); return ret; } iof->futexv_owned = 0; iof->futexv_unqueued = 0; req->flags |= REQ_F_ASYNC_DATA; req->async_data = futexv; return 0; } static void io_futex_wake_fn(struct wake_q_head *wake_q, struct futex_q *q) { struct io_futex_data *ifd = container_of(q, struct io_futex_data, q); struct io_kiocb *req = ifd->req; if (unlikely(!__futex_wake_mark(q))) return; io_req_set_res(req, 0, 0); req->io_task_work.func = io_futex_complete; io_req_task_work_add(req); } static struct io_futex_data *io_alloc_ifd(struct io_ring_ctx *ctx) { struct io_futex_data *ifd; ifd = io_alloc_cache_get(&ctx->futex_cache); if (ifd) return ifd; return kmalloc(sizeof(struct io_futex_data), GFP_NOWAIT); } int io_futexv_wait(struct io_kiocb *req, unsigned int issue_flags) { struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex); struct futex_vector *futexv = req->async_data; struct io_ring_ctx *ctx = req->ctx; int ret, woken = -1; io_ring_submit_lock(ctx, issue_flags); ret = futex_wait_multiple_setup(futexv, iof->futex_nr, &woken); /* * Error case, ret is < 0. Mark the request as failed. */ if (unlikely(ret < 0)) { io_ring_submit_unlock(ctx, issue_flags); req_set_fail(req); io_req_set_res(req, ret, 0); kfree(futexv); req->async_data = NULL; req->flags &= ~REQ_F_ASYNC_DATA; return IOU_OK; } /* * 0 return means that we successfully setup the waiters, and that * nobody triggered a wakeup while we were doing so. If the wakeup * happened post setup, the task_work will be run post this issue and * under the submission lock. 1 means We got woken while setting up, * let that side do the completion. Note that * futex_wait_multiple_setup() will have unqueued all the futexes in * this case. Mark us as having done that already, since this is * different from normal wakeup. */ if (!ret) { /* * If futex_wait_multiple_setup() returns 0 for a * successful setup, then the task state will not be * runnable. This is fine for the sync syscall, as * it'll be blocking unless we already got one of the * futexes woken, but it obviously won't work for an * async invocation. Mark us runnable again. */ __set_current_state(TASK_RUNNING); hlist_add_head(&req->hash_node, &ctx->futex_list); } else { iof->futexv_unqueued = 1; if (woken != -1) io_req_set_res(req, woken, 0); } io_ring_submit_unlock(ctx, issue_flags); return IOU_ISSUE_SKIP_COMPLETE; } int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags) { struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex); struct io_ring_ctx *ctx = req->ctx; struct io_futex_data *ifd = NULL; struct futex_hash_bucket *hb; int ret; if (!iof->futex_mask) { ret = -EINVAL; goto done; } io_ring_submit_lock(ctx, issue_flags); ifd = io_alloc_ifd(ctx); if (!ifd) { ret = -ENOMEM; goto done_unlock; } req->async_data = ifd; ifd->q = futex_q_init; ifd->q.bitset = iof->futex_mask; ifd->q.wake = io_futex_wake_fn; ifd->req = req; ret = futex_wait_setup(iof->uaddr, iof->futex_val, iof->futex_flags, &ifd->q, &hb); if (!ret) { hlist_add_head(&req->hash_node, &ctx->futex_list); io_ring_submit_unlock(ctx, issue_flags); futex_queue(&ifd->q, hb); return IOU_ISSUE_SKIP_COMPLETE; } done_unlock: io_ring_submit_unlock(ctx, issue_flags); done: if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); kfree(ifd); return IOU_OK; } int io_futex_wake(struct io_kiocb *req, unsigned int issue_flags) { struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex); int ret; /* * Strict flags - ensure that waking 0 futexes yields a 0 result. * See commit 43adf8449510 ("futex: FLAGS_STRICT") for details. */ ret = futex_wake(iof->uaddr, FLAGS_STRICT | iof->futex_flags, iof->futex_val, iof->futex_mask); if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); return IOU_OK; }
23 24 11 24 24 1 11 4 7 11 11 11 9 1 8 2 1 1 17 17 21 21 8 17 12 12 6 11 11 2 1 1 1 2 2 1 1 1 2 1 1 23 4 1 1 3 4 4 3 4 5 1 1 24 24 11 13 24 19 5 27 3 24 16 3 3 12 17 17 11 11 11 11 2 9 11 11 11 7 7 7 3 4 11 11 11 11 11 7 7 7 4 1 3 3 4 1 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 // SPDX-License-Identifier: GPL-2.0-only /* * Framework for buffer objects that can be shared across devices/subsystems. * * Copyright(C) 2011 Linaro Limited. All rights reserved. * Author: Sumit Semwal <sumit.semwal@ti.com> * * Many thanks to linaro-mm-sig list, and specially * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and * Daniel Vetter <daniel@ffwll.ch> for their support in creation and * refining of this idea. */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/dma-buf.h> #include <linux/dma-fence.h> #include <linux/dma-fence-unwrap.h> #include <linux/anon_inodes.h> #include <linux/export.h> #include <linux/debugfs.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/sync_file.h> #include <linux/poll.h> #include <linux/dma-resv.h> #include <linux/mm.h> #include <linux/mount.h> #include <linux/pseudo_fs.h> #include <uapi/linux/dma-buf.h> #include <uapi/linux/magic.h> #include "dma-buf-sysfs-stats.h" static inline int is_dma_buf_file(struct file *); #if IS_ENABLED(CONFIG_DEBUG_FS) static DEFINE_MUTEX(debugfs_list_mutex); static LIST_HEAD(debugfs_list); static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf) { mutex_lock(&debugfs_list_mutex); list_add(&dmabuf->list_node, &debugfs_list); mutex_unlock(&debugfs_list_mutex); } static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf) { if (!dmabuf) return; mutex_lock(&debugfs_list_mutex); list_del(&dmabuf->list_node); mutex_unlock(&debugfs_list_mutex); } #else static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf) { } static void __dma_buf_debugfs_list_del(struct file *file) { } #endif static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) { struct dma_buf *dmabuf; char name[DMA_BUF_NAME_LEN]; ssize_t ret = 0; dmabuf = dentry->d_fsdata; spin_lock(&dmabuf->name_lock); if (dmabuf->name) ret = strscpy(name, dmabuf->name, sizeof(name)); spin_unlock(&dmabuf->name_lock); return dynamic_dname(buffer, buflen, "/%s:%s", dentry->d_name.name, ret > 0 ? name : ""); } static void dma_buf_release(struct dentry *dentry) { struct dma_buf *dmabuf; dmabuf = dentry->d_fsdata; if (unlikely(!dmabuf)) return; BUG_ON(dmabuf->vmapping_counter); /* * If you hit this BUG() it could mean: * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback */ BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active); dma_buf_stats_teardown(dmabuf); dmabuf->ops->release(dmabuf); if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) dma_resv_fini(dmabuf->resv); WARN_ON(!list_empty(&dmabuf->attachments)); module_put(dmabuf->owner); kfree(dmabuf->name); kfree(dmabuf); } static int dma_buf_file_release(struct inode *inode, struct file *file) { if (!is_dma_buf_file(file)) return -EINVAL; __dma_buf_debugfs_list_del(file->private_data); return 0; } static const struct dentry_operations dma_buf_dentry_ops = { .d_dname = dmabuffs_dname, .d_release = dma_buf_release, }; static struct vfsmount *dma_buf_mnt; static int dma_buf_fs_init_context(struct fs_context *fc) { struct pseudo_fs_context *ctx; ctx = init_pseudo(fc, DMA_BUF_MAGIC); if (!ctx) return -ENOMEM; ctx->dops = &dma_buf_dentry_ops; return 0; } static struct file_system_type dma_buf_fs_type = { .name = "dmabuf", .init_fs_context = dma_buf_fs_init_context, .kill_sb = kill_anon_super, }; static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) { struct dma_buf *dmabuf; if (!is_dma_buf_file(file)) return -EINVAL; dmabuf = file->private_data; /* check if buffer supports mmap */ if (!dmabuf->ops->mmap) return -EINVAL; /* check for overflowing the buffer's size */ if (vma->vm_pgoff + vma_pages(vma) > dmabuf->size >> PAGE_SHIFT) return -EINVAL; return dmabuf->ops->mmap(dmabuf, vma); } static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) { struct dma_buf *dmabuf; loff_t base; if (!is_dma_buf_file(file)) return -EBADF; dmabuf = file->private_data; /* only support discovering the end of the buffer, but also allow SEEK_SET to maintain the idiomatic SEEK_END(0), SEEK_CUR(0) pattern */ if (whence == SEEK_END) base = dmabuf->size; else if (whence == SEEK_SET) base = 0; else return -EINVAL; if (offset != 0) return -EINVAL; return base + offset; } /** * DOC: implicit fence polling * * To support cross-device and cross-driver synchronization of buffer access * implicit fences (represented internally in the kernel with &struct dma_fence) * can be attached to a &dma_buf. The glue for that and a few related things are * provided in the &dma_resv structure. * * Userspace can query the state of these implicitly tracked fences using poll() * and related system calls: * * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the * most recent write or exclusive fence. * * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of * all attached fences, shared and exclusive ones. * * Note that this only signals the completion of the respective fences, i.e. the * DMA transfers are complete. Cache flushing and any other necessary * preparations before CPU access can begin still need to happen. * * As an alternative to poll(), the set of fences on DMA buffer can be * exported as a &sync_file using &dma_buf_sync_file_export. */ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll); unsigned long flags; spin_lock_irqsave(&dcb->poll->lock, flags); wake_up_locked_poll(dcb->poll, dcb->active); dcb->active = 0; spin_unlock_irqrestore(&dcb->poll->lock, flags); dma_fence_put(fence); /* Paired with get_file in dma_buf_poll */ fput(dmabuf->file); } static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write, struct dma_buf_poll_cb_t *dcb) { struct dma_resv_iter cursor; struct dma_fence *fence; int r; dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write), fence) { dma_fence_get(fence); r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); if (!r) return true; dma_fence_put(fence); } return false; } static __poll_t dma_buf_poll(struct file *file, poll_table *poll) { struct dma_buf *dmabuf; struct dma_resv *resv; __poll_t events; dmabuf = file->private_data; if (!dmabuf || !dmabuf->resv) return EPOLLERR; resv = dmabuf->resv; poll_wait(file, &dmabuf->poll, poll); events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT); if (!events) return 0; dma_resv_lock(resv, NULL); if (events & EPOLLOUT) { struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out; /* Check that callback isn't busy */ spin_lock_irq(&dmabuf->poll.lock); if (dcb->active) events &= ~EPOLLOUT; else dcb->active = EPOLLOUT; spin_unlock_irq(&dmabuf->poll.lock); if (events & EPOLLOUT) { /* Paired with fput in dma_buf_poll_cb */ get_file(dmabuf->file); if (!dma_buf_poll_add_cb(resv, true, dcb)) /* No callback queued, wake up any other waiters */ dma_buf_poll_cb(NULL, &dcb->cb); else events &= ~EPOLLOUT; } } if (events & EPOLLIN) { struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in; /* Check that callback isn't busy */ spin_lock_irq(&dmabuf->poll.lock); if (dcb->active) events &= ~EPOLLIN; else dcb->active = EPOLLIN; spin_unlock_irq(&dmabuf->poll.lock); if (events & EPOLLIN) { /* Paired with fput in dma_buf_poll_cb */ get_file(dmabuf->file); if (!dma_buf_poll_add_cb(resv, false, dcb)) /* No callback queued, wake up any other waiters */ dma_buf_poll_cb(NULL, &dcb->cb); else events &= ~EPOLLIN; } } dma_resv_unlock(resv); return events; } /** * dma_buf_set_name - Set a name to a specific dma_buf to track the usage. * It could support changing the name of the dma-buf if the same * piece of memory is used for multiple purpose between different devices. * * @dmabuf: [in] dmabuf buffer that will be renamed. * @buf: [in] A piece of userspace memory that contains the name of * the dma-buf. * * Returns 0 on success. If the dma-buf buffer is already attached to * devices, return -EBUSY. * */ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) { char *name = strndup_user(buf, DMA_BUF_NAME_LEN); if (IS_ERR(name)) return PTR_ERR(name); spin_lock(&dmabuf->name_lock); kfree(dmabuf->name); dmabuf->name = name; spin_unlock(&dmabuf->name_lock); return 0; } #if IS_ENABLED(CONFIG_SYNC_FILE) static long dma_buf_export_sync_file(struct dma_buf *dmabuf, void __user *user_data) { struct dma_buf_export_sync_file arg; enum dma_resv_usage usage; struct dma_fence *fence = NULL; struct sync_file *sync_file; int fd, ret; if (copy_from_user(&arg, user_data, sizeof(arg))) return -EFAULT; if (arg.flags & ~DMA_BUF_SYNC_RW) return -EINVAL; if ((arg.flags & DMA_BUF_SYNC_RW) == 0) return -EINVAL; fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) return fd; usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE); ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence); if (ret) goto err_put_fd; if (!fence) fence = dma_fence_get_stub(); sync_file = sync_file_create(fence); dma_fence_put(fence); if (!sync_file) { ret = -ENOMEM; goto err_put_fd; } arg.fd = fd; if (copy_to_user(user_data, &arg, sizeof(arg))) { ret = -EFAULT; goto err_put_file; } fd_install(fd, sync_file->file); return 0; err_put_file: fput(sync_file->file); err_put_fd: put_unused_fd(fd); return ret; } static long dma_buf_import_sync_file(struct dma_buf *dmabuf, const void __user *user_data) { struct dma_buf_import_sync_file arg; struct dma_fence *fence, *f; enum dma_resv_usage usage; struct dma_fence_unwrap iter; unsigned int num_fences; int ret = 0; if (copy_from_user(&arg, user_data, sizeof(arg))) return -EFAULT; if (arg.flags & ~DMA_BUF_SYNC_RW) return -EINVAL; if ((arg.flags & DMA_BUF_SYNC_RW) == 0) return -EINVAL; fence = sync_file_get_fence(arg.fd); if (!fence) return -EINVAL; usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ; num_fences = 0; dma_fence_unwrap_for_each(f, &iter, fence) ++num_fences; if (num_fences > 0) { dma_resv_lock(dmabuf->resv, NULL); ret = dma_resv_reserve_fences(dmabuf->resv, num_fences); if (!ret) { dma_fence_unwrap_for_each(f, &iter, fence) dma_resv_add_fence(dmabuf->resv, f, usage); } dma_resv_unlock(dmabuf->resv); } dma_fence_put(fence); return ret; } #endif static long dma_buf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct dma_buf *dmabuf; struct dma_buf_sync sync; enum dma_data_direction direction; int ret; dmabuf = file->private_data; switch (cmd) { case DMA_BUF_IOCTL_SYNC: if (copy_from_user(&sync, (void __user *) arg, sizeof(sync))) return -EFAULT; if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK) return -EINVAL; switch (sync.flags & DMA_BUF_SYNC_RW) { case DMA_BUF_SYNC_READ: direction = DMA_FROM_DEVICE; break; case DMA_BUF_SYNC_WRITE: direction = DMA_TO_DEVICE; break; case DMA_BUF_SYNC_RW: direction = DMA_BIDIRECTIONAL; break; default: return -EINVAL; } if (sync.flags & DMA_BUF_SYNC_END) ret = dma_buf_end_cpu_access(dmabuf, direction); else ret = dma_buf_begin_cpu_access(dmabuf, direction); return ret; case DMA_BUF_SET_NAME_A: case DMA_BUF_SET_NAME_B: return dma_buf_set_name(dmabuf, (const char __user *)arg); #if IS_ENABLED(CONFIG_SYNC_FILE) case DMA_BUF_IOCTL_EXPORT_SYNC_FILE: return dma_buf_export_sync_file(dmabuf, (void __user *)arg); case DMA_BUF_IOCTL_IMPORT_SYNC_FILE: return dma_buf_import_sync_file(dmabuf, (const void __user *)arg); #endif default: return -ENOTTY; } } static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) { struct dma_buf *dmabuf = file->private_data; seq_printf(m, "size:\t%zu\n", dmabuf->size); /* Don't count the temporary reference taken inside procfs seq_show */ seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); spin_lock(&dmabuf->name_lock); if (dmabuf->name) seq_printf(m, "name:\t%s\n", dmabuf->name); spin_unlock(&dmabuf->name_lock); } static const struct file_operations dma_buf_fops = { .release = dma_buf_file_release, .mmap = dma_buf_mmap_internal, .llseek = dma_buf_llseek, .poll = dma_buf_poll, .unlocked_ioctl = dma_buf_ioctl, .compat_ioctl = compat_ptr_ioctl, .show_fdinfo = dma_buf_show_fdinfo, }; /* * is_dma_buf_file - Check if struct file* is associated with dma_buf */ static inline int is_dma_buf_file(struct file *file) { return file->f_op == &dma_buf_fops; } static struct file *dma_buf_getfile(size_t size, int flags) { static atomic64_t dmabuf_inode = ATOMIC64_INIT(0); struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); struct file *file; if (IS_ERR(inode)) return ERR_CAST(inode); inode->i_size = size; inode_set_bytes(inode, size); /* * The ->i_ino acquired from get_next_ino() is not unique thus * not suitable for using it as dentry name by dmabuf stats. * Override ->i_ino with the unique and dmabuffs specific * value. */ inode->i_ino = atomic64_add_return(1, &dmabuf_inode); flags &= O_ACCMODE | O_NONBLOCK; file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", flags, &dma_buf_fops); if (IS_ERR(file)) goto err_alloc_file; return file; err_alloc_file: iput(inode); return file; } /** * DOC: dma buf device access * * For device DMA access to a shared DMA buffer the usual sequence of operations * is fairly simple: * * 1. The exporter defines his exporter instance using * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private * buffer object into a &dma_buf. It then exports that &dma_buf to userspace * as a file descriptor by calling dma_buf_fd(). * * 2. Userspace passes this file-descriptors to all drivers it wants this buffer * to share with: First the file descriptor is converted to a &dma_buf using * dma_buf_get(). Then the buffer is attached to the device using * dma_buf_attach(). * * Up to this stage the exporter is still free to migrate or reallocate the * backing storage. * * 3. Once the buffer is attached to all devices userspace can initiate DMA * access to the shared buffer. In the kernel this is done by calling * dma_buf_map_attachment() and dma_buf_unmap_attachment(). * * 4. Once a driver is done with a shared buffer it needs to call * dma_buf_detach() (after cleaning up any mappings) and then release the * reference acquired with dma_buf_get() by calling dma_buf_put(). * * For the detailed semantics exporters are expected to implement see * &dma_buf_ops. */ /** * dma_buf_export - Creates a new dma_buf, and associates an anon file * with this buffer, so it can be exported. * Also connect the allocator specific data and ops to the buffer. * Additionally, provide a name string for exporter; useful in debugging. * * @exp_info: [in] holds all the export related information provided * by the exporter. see &struct dma_buf_export_info * for further details. * * Returns, on success, a newly created struct dma_buf object, which wraps the * supplied private data and operations for struct dma_buf_ops. On either * missing ops, or error in allocating struct dma_buf, will return negative * error. * * For most cases the easiest way to create @exp_info is through the * %DEFINE_DMA_BUF_EXPORT_INFO macro. */ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) { struct dma_buf *dmabuf; struct dma_resv *resv = exp_info->resv; struct file *file; size_t alloc_size = sizeof(struct dma_buf); int ret; if (WARN_ON(!exp_info->priv || !exp_info->ops || !exp_info->ops->map_dma_buf || !exp_info->ops->unmap_dma_buf || !exp_info->ops->release)) return ERR_PTR(-EINVAL); if (WARN_ON(exp_info->ops->cache_sgt_mapping && (exp_info->ops->pin || exp_info->ops->unpin))) return ERR_PTR(-EINVAL); if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) return ERR_PTR(-EINVAL); if (!try_module_get(exp_info->owner)) return ERR_PTR(-ENOENT); file = dma_buf_getfile(exp_info->size, exp_info->flags); if (IS_ERR(file)) { ret = PTR_ERR(file); goto err_module; } if (!exp_info->resv) alloc_size += sizeof(struct dma_resv); else /* prevent &dma_buf[1] == dma_buf->resv */ alloc_size += 1; dmabuf = kzalloc(alloc_size, GFP_KERNEL); if (!dmabuf) { ret = -ENOMEM; goto err_file; } dmabuf->priv = exp_info->priv; dmabuf->ops = exp_info->ops; dmabuf->size = exp_info->size; dmabuf->exp_name = exp_info->exp_name; dmabuf->owner = exp_info->owner; spin_lock_init(&dmabuf->name_lock); init_waitqueue_head(&dmabuf->poll); dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll; dmabuf->cb_in.active = dmabuf->cb_out.active = 0; INIT_LIST_HEAD(&dmabuf->attachments); if (!resv) { dmabuf->resv = (struct dma_resv *)&dmabuf[1]; dma_resv_init(dmabuf->resv); } else { dmabuf->resv = resv; } ret = dma_buf_stats_setup(dmabuf, file); if (ret) goto err_dmabuf; file->private_data = dmabuf; file->f_path.dentry->d_fsdata = dmabuf; dmabuf->file = file; __dma_buf_debugfs_list_add(dmabuf); return dmabuf; err_dmabuf: if (!resv) dma_resv_fini(dmabuf->resv); kfree(dmabuf); err_file: fput(file); err_module: module_put(exp_info->owner); return ERR_PTR(ret); } EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF); /** * dma_buf_fd - returns a file descriptor for the given struct dma_buf * @dmabuf: [in] pointer to dma_buf for which fd is required. * @flags: [in] flags to give to fd * * On success, returns an associated 'fd'. Else, returns error. */ int dma_buf_fd(struct dma_buf *dmabuf, int flags) { int fd; if (!dmabuf || !dmabuf->file) return -EINVAL; fd = get_unused_fd_flags(flags); if (fd < 0) return fd; fd_install(fd, dmabuf->file); return fd; } EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF); /** * dma_buf_get - returns the struct dma_buf related to an fd * @fd: [in] fd associated with the struct dma_buf to be returned * * On success, returns the struct dma_buf associated with an fd; uses * file's refcounting done by fget to increase refcount. returns ERR_PTR * otherwise. */ struct dma_buf *dma_buf_get(int fd) { struct file *file; file = fget(fd); if (!file) return ERR_PTR(-EBADF); if (!is_dma_buf_file(file)) { fput(file); return ERR_PTR(-EINVAL); } return file->private_data; } EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF); /** * dma_buf_put - decreases refcount of the buffer * @dmabuf: [in] buffer to reduce refcount of * * Uses file's refcounting done implicitly by fput(). * * If, as a result of this call, the refcount becomes 0, the 'release' file * operation related to this fd is called. It calls &dma_buf_ops.release vfunc * in turn, and frees the memory allocated for dmabuf when exported. */ void dma_buf_put(struct dma_buf *dmabuf) { if (WARN_ON(!dmabuf || !dmabuf->file)) return; fput(dmabuf->file); } EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF); static void mangle_sg_table(struct sg_table *sg_table) { #ifdef CONFIG_DMABUF_DEBUG int i; struct scatterlist *sg; /* To catch abuse of the underlying struct page by importers mix * up the bits, but take care to preserve the low SG_ bits to * not corrupt the sgt. The mixing is undone in __unmap_dma_buf * before passing the sgt back to the exporter. */ for_each_sgtable_sg(sg_table, sg, i) sg->page_link ^= ~0xffUL; #endif } static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction direction) { struct sg_table *sg_table; signed long ret; sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); if (IS_ERR_OR_NULL(sg_table)) return sg_table; if (!dma_buf_attachment_is_dynamic(attach)) { ret = dma_resv_wait_timeout(attach->dmabuf->resv, DMA_RESV_USAGE_KERNEL, true, MAX_SCHEDULE_TIMEOUT); if (ret < 0) { attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); return ERR_PTR(ret); } } mangle_sg_table(sg_table); return sg_table; } /** * DOC: locking convention * * In order to avoid deadlock situations between dma-buf exports and importers, * all dma-buf API users must follow the common dma-buf locking convention. * * Convention for importers * * 1. Importers must hold the dma-buf reservation lock when calling these * functions: * * - dma_buf_pin() * - dma_buf_unpin() * - dma_buf_map_attachment() * - dma_buf_unmap_attachment() * - dma_buf_vmap() * - dma_buf_vunmap() * * 2. Importers must not hold the dma-buf reservation lock when calling these * functions: * * - dma_buf_attach() * - dma_buf_dynamic_attach() * - dma_buf_detach() * - dma_buf_export() * - dma_buf_fd() * - dma_buf_get() * - dma_buf_put() * - dma_buf_mmap() * - dma_buf_begin_cpu_access() * - dma_buf_end_cpu_access() * - dma_buf_map_attachment_unlocked() * - dma_buf_unmap_attachment_unlocked() * - dma_buf_vmap_unlocked() * - dma_buf_vunmap_unlocked() * * Convention for exporters * * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf * reservation and exporter can take the lock: * * - &dma_buf_ops.attach() * - &dma_buf_ops.detach() * - &dma_buf_ops.release() * - &dma_buf_ops.begin_cpu_access() * - &dma_buf_ops.end_cpu_access() * - &dma_buf_ops.mmap() * * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf * reservation and exporter can't take the lock: * * - &dma_buf_ops.pin() * - &dma_buf_ops.unpin() * - &dma_buf_ops.map_dma_buf() * - &dma_buf_ops.unmap_dma_buf() * - &dma_buf_ops.vmap() * - &dma_buf_ops.vunmap() * * 3. Exporters must hold the dma-buf reservation lock when calling these * functions: * * - dma_buf_move_notify() */ /** * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list * @dmabuf: [in] buffer to attach device to. * @dev: [in] device to be attached. * @importer_ops: [in] importer operations for the attachment * @importer_priv: [in] importer private pointer for the attachment * * Returns struct dma_buf_attachment pointer for this attachment. Attachments * must be cleaned up by calling dma_buf_detach(). * * Optionally this calls &dma_buf_ops.attach to allow device-specific attach * functionality. * * Returns: * * A pointer to newly created &dma_buf_attachment on success, or a negative * error code wrapped into a pointer on failure. * * Note that this can fail if the backing storage of @dmabuf is in a place not * accessible to @dev, and cannot be moved to a more suitable place. This is * indicated with the error code -EBUSY. */ struct dma_buf_attachment * dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, const struct dma_buf_attach_ops *importer_ops, void *importer_priv) { struct dma_buf_attachment *attach; int ret; if (WARN_ON(!dmabuf || !dev)) return ERR_PTR(-EINVAL); if (WARN_ON(importer_ops && !importer_ops->move_notify)) return ERR_PTR(-EINVAL); attach = kzalloc(sizeof(*attach), GFP_KERNEL); if (!attach) return ERR_PTR(-ENOMEM); attach->dev = dev; attach->dmabuf = dmabuf; if (importer_ops) attach->peer2peer = importer_ops->allow_peer2peer; attach->importer_ops = importer_ops; attach->importer_priv = importer_priv; if (dmabuf->ops->attach) { ret = dmabuf->ops->attach(dmabuf, attach); if (ret) goto err_attach; } dma_resv_lock(dmabuf->resv, NULL); list_add(&attach->node, &dmabuf->attachments); dma_resv_unlock(dmabuf->resv); /* When either the importer or the exporter can't handle dynamic * mappings we cache the mapping here to avoid issues with the * reservation object lock. */ if (dma_buf_attachment_is_dynamic(attach) != dma_buf_is_dynamic(dmabuf)) { struct sg_table *sgt; dma_resv_lock(attach->dmabuf->resv, NULL); if (dma_buf_is_dynamic(attach->dmabuf)) { ret = dmabuf->ops->pin(attach); if (ret) goto err_unlock; } sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL); if (!sgt) sgt = ERR_PTR(-ENOMEM); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto err_unpin; } dma_resv_unlock(attach->dmabuf->resv); attach->sgt = sgt; attach->dir = DMA_BIDIRECTIONAL; } return attach; err_attach: kfree(attach); return ERR_PTR(ret); err_unpin: if (dma_buf_is_dynamic(attach->dmabuf)) dmabuf->ops->unpin(attach); err_unlock: dma_resv_unlock(attach->dmabuf->resv); dma_buf_detach(dmabuf, attach); return ERR_PTR(ret); } EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF); /** * dma_buf_attach - Wrapper for dma_buf_dynamic_attach * @dmabuf: [in] buffer to attach device to. * @dev: [in] device to be attached. * * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static * mapping. */ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct device *dev) { return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL); } EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF); static void __unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *sg_table, enum dma_data_direction direction) { /* uses XOR, hence this unmangles */ mangle_sg_table(sg_table); attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); } /** * dma_buf_detach - Remove the given attachment from dmabuf's attachments list * @dmabuf: [in] buffer to detach from. * @attach: [in] attachment to be detached; is free'd after this call. * * Clean up a device attachment obtained by calling dma_buf_attach(). * * Optionally this calls &dma_buf_ops.detach for device-specific detach. */ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) { if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf)) return; dma_resv_lock(dmabuf->resv, NULL); if (attach->sgt) { __unmap_dma_buf(attach, attach->sgt, attach->dir); if (dma_buf_is_dynamic(attach->dmabuf)) dmabuf->ops->unpin(attach); } list_del(&attach->node); dma_resv_unlock(dmabuf->resv); if (dmabuf->ops->detach) dmabuf->ops->detach(dmabuf, attach); kfree(attach); } EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF); /** * dma_buf_pin - Lock down the DMA-buf * @attach: [in] attachment which should be pinned * * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may * call this, and only for limited use cases like scanout and not for temporary * pin operations. It is not permitted to allow userspace to pin arbitrary * amounts of buffers through this interface. * * Buffers must be unpinned by calling dma_buf_unpin(). * * Returns: * 0 on success, negative error code on failure. */ int dma_buf_pin(struct dma_buf_attachment *attach) { struct dma_buf *dmabuf = attach->dmabuf; int ret = 0; WARN_ON(!dma_buf_attachment_is_dynamic(attach)); dma_resv_assert_held(dmabuf->resv); if (dmabuf->ops->pin) ret = dmabuf->ops->pin(attach); return ret; } EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF); /** * dma_buf_unpin - Unpin a DMA-buf * @attach: [in] attachment which should be unpinned * * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move * any mapping of @attach again and inform the importer through * &dma_buf_attach_ops.move_notify. */ void dma_buf_unpin(struct dma_buf_attachment *attach) { struct dma_buf *dmabuf = attach->dmabuf; WARN_ON(!dma_buf_attachment_is_dynamic(attach)); dma_resv_assert_held(dmabuf->resv); if (dmabuf->ops->unpin) dmabuf->ops->unpin(attach); } EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF); /** * dma_buf_map_attachment - Returns the scatterlist table of the attachment; * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the * dma_buf_ops. * @attach: [in] attachment whose scatterlist is to be returned * @direction: [in] direction of DMA transfer * * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR * on error. May return -EINTR if it is interrupted by a signal. * * On success, the DMA addresses and lengths in the returned scatterlist are * PAGE_SIZE aligned. * * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that * the underlying backing storage is pinned for as long as a mapping exists, * therefore users/importers should not hold onto a mapping for undue amounts of * time. * * Important: Dynamic importers must wait for the exclusive fence of the struct * dma_resv attached to the DMA-BUF first. */ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction) { struct sg_table *sg_table; int r; might_sleep(); if (WARN_ON(!attach || !attach->dmabuf)) return ERR_PTR(-EINVAL); dma_resv_assert_held(attach->dmabuf->resv); if (attach->sgt) { /* * Two mappings with different directions for the same * attachment are not allowed. */ if (attach->dir != direction && attach->dir != DMA_BIDIRECTIONAL) return ERR_PTR(-EBUSY); return attach->sgt; } if (dma_buf_is_dynamic(attach->dmabuf)) { if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { r = attach->dmabuf->ops->pin(attach); if (r) return ERR_PTR(r); } } sg_table = __map_dma_buf(attach, direction); if (!sg_table) sg_table = ERR_PTR(-ENOMEM); if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) && !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) attach->dmabuf->ops->unpin(attach); if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) { attach->sgt = sg_table; attach->dir = direction; } #ifdef CONFIG_DMA_API_DEBUG if (!IS_ERR(sg_table)) { struct scatterlist *sg; u64 addr; int len; int i; for_each_sgtable_dma_sg(sg_table, sg, i) { addr = sg_dma_address(sg); len = sg_dma_len(sg); if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) { pr_debug("%s: addr %llx or len %x is not page aligned!\n", __func__, addr, len); } } } #endif /* CONFIG_DMA_API_DEBUG */ return sg_table; } EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF); /** * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment; * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the * dma_buf_ops. * @attach: [in] attachment whose scatterlist is to be returned * @direction: [in] direction of DMA transfer * * Unlocked variant of dma_buf_map_attachment(). */ struct sg_table * dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach, enum dma_data_direction direction) { struct sg_table *sg_table; might_sleep(); if (WARN_ON(!attach || !attach->dmabuf)) return ERR_PTR(-EINVAL); dma_resv_lock(attach->dmabuf->resv, NULL); sg_table = dma_buf_map_attachment(attach, direction); dma_resv_unlock(attach->dmabuf->resv); return sg_table; } EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF); /** * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of * dma_buf_ops. * @attach: [in] attachment to unmap buffer from * @sg_table: [in] scatterlist info of the buffer to unmap * @direction: [in] direction of DMA transfer * * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment(). */ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, struct sg_table *sg_table, enum dma_data_direction direction) { might_sleep(); if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) return; dma_resv_assert_held(attach->dmabuf->resv); if (attach->sgt == sg_table) return; __unmap_dma_buf(attach, sg_table, direction); if (dma_buf_is_dynamic(attach->dmabuf) && !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) dma_buf_unpin(attach); } EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF); /** * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of * dma_buf_ops. * @attach: [in] attachment to unmap buffer from * @sg_table: [in] scatterlist info of the buffer to unmap * @direction: [in] direction of DMA transfer * * Unlocked variant of dma_buf_unmap_attachment(). */ void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach, struct sg_table *sg_table, enum dma_data_direction direction) { might_sleep(); if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) return; dma_resv_lock(attach->dmabuf->resv, NULL); dma_buf_unmap_attachment(attach, sg_table, direction); dma_resv_unlock(attach->dmabuf->resv); } EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF); /** * dma_buf_move_notify - notify attachments that DMA-buf is moving * * @dmabuf: [in] buffer which is moving * * Informs all attachments that they need to destroy and recreate all their * mappings. */ void dma_buf_move_notify(struct dma_buf *dmabuf) { struct dma_buf_attachment *attach; dma_resv_assert_held(dmabuf->resv); list_for_each_entry(attach, &dmabuf->attachments, node) if (attach->importer_ops) attach->importer_ops->move_notify(attach); } EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF); /** * DOC: cpu access * * There are multiple reasons for supporting CPU access to a dma buffer object: * * - Fallback operations in the kernel, for example when a device is connected * over USB and the kernel needs to shuffle the data around first before * sending it away. Cache coherency is handled by bracketing any transactions * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access() * access. * * Since for most kernel internal dma-buf accesses need the entire buffer, a * vmap interface is introduced. Note that on very old 32-bit architectures * vmalloc space might be limited and result in vmap calls failing. * * Interfaces:: * * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map) * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map) * * The vmap call can fail if there is no vmap support in the exporter, or if * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference * count for all vmap access and calls down into the exporter's vmap function * only when no vmapping exists, and only unmaps it once. Protection against * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex. * * - For full compatibility on the importer side with existing userspace * interfaces, which might already support mmap'ing buffers. This is needed in * many processing pipelines (e.g. feeding a software rendered image into a * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION * framework already supported this and for DMA buffer file descriptors to * replace ION buffers mmap support was needed. * * There is no special interfaces, userspace simply calls mmap on the dma-buf * fd. But like for CPU access there's a need to bracket the actual access, * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must * be restarted. * * Some systems might need some sort of cache coherency management e.g. when * CPU and GPU domains are being accessed through dma-buf at the same time. * To circumvent this problem there are begin/end coherency markers, that * forward directly to existing dma-buf device drivers vfunc hooks. Userspace * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The * sequence would be used like following: * * - mmap dma-buf fd * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you * want (with the new data being consumed by say the GPU or the scanout * device) * - munmap once you don't need the buffer any more * * For correctness and optimal performance, it is always required to use * SYNC_START and SYNC_END before and after, respectively, when accessing the * mapped address. Userspace cannot rely on coherent access, even when there * are systems where it just works without calling these ioctls. * * - And as a CPU fallback in userspace processing pipelines. * * Similar to the motivation for kernel cpu access it is again important that * the userspace code of a given importing subsystem can use the same * interfaces with a imported dma-buf buffer object as with a native buffer * object. This is especially important for drm where the userspace part of * contemporary OpenGL, X, and other drivers is huge, and reworking them to * use a different way to mmap a buffer rather invasive. * * The assumption in the current dma-buf interfaces is that redirecting the * initial mmap is all that's needed. A survey of some of the existing * subsystems shows that no driver seems to do any nefarious thing like * syncing up with outstanding asynchronous processing on the device or * allocating special resources at fault time. So hopefully this is good * enough, since adding interfaces to intercept pagefaults and allow pte * shootdowns would increase the complexity quite a bit. * * Interface:: * * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*, * unsigned long); * * If the importing subsystem simply provides a special-purpose mmap call to * set up a mapping in userspace, calling do_mmap with &dma_buf.file will * equally achieve that for a dma-buf object. */ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); struct dma_resv *resv = dmabuf->resv; long ret; /* Wait on any implicit rendering fences */ ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write), true, MAX_SCHEDULE_TIMEOUT); if (ret < 0) return ret; return 0; } /** * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific * preparations. Coherency is only guaranteed in the specified range for the * specified access direction. * @dmabuf: [in] buffer to prepare cpu access for. * @direction: [in] direction of access. * * After the cpu access is complete the caller should call * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is * it guaranteed to be coherent with other DMA access. * * This function will also wait for any DMA transactions tracked through * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit * synchronization this function will only ensure cache coherency, callers must * ensure synchronization with such DMA transactions on their own. * * Can return negative error values, returns 0 on success. */ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { int ret = 0; if (WARN_ON(!dmabuf)) return -EINVAL; might_lock(&dmabuf->resv->lock.base); if (dmabuf->ops->begin_cpu_access) ret = dmabuf->ops->begin_cpu_access(dmabuf, direction); /* Ensure that all fences are waited upon - but we first allow * the native handler the chance to do so more efficiently if it * chooses. A double invocation here will be reasonably cheap no-op. */ if (ret == 0) ret = __dma_buf_begin_cpu_access(dmabuf, direction); return ret; } EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF); /** * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific * actions. Coherency is only guaranteed in the specified range for the * specified access direction. * @dmabuf: [in] buffer to complete cpu access for. * @direction: [in] direction of access. * * This terminates CPU access started with dma_buf_begin_cpu_access(). * * Can return negative error values, returns 0 on success. */ int dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { int ret = 0; WARN_ON(!dmabuf); might_lock(&dmabuf->resv->lock.base); if (dmabuf->ops->end_cpu_access) ret = dmabuf->ops->end_cpu_access(dmabuf, direction); return ret; } EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF); /** * dma_buf_mmap - Setup up a userspace mmap with the given vma * @dmabuf: [in] buffer that should back the vma * @vma: [in] vma for the mmap * @pgoff: [in] offset in pages where this mmap should start within the * dma-buf buffer. * * This function adjusts the passed in vma so that it points at the file of the * dma_buf operation. It also adjusts the starting pgoff and does bounds * checking on the size of the vma. Then it calls the exporters mmap function to * set up the mapping. * * Can return negative error values, returns 0 on success. */ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, unsigned long pgoff) { if (WARN_ON(!dmabuf || !vma)) return -EINVAL; /* check if buffer supports mmap */ if (!dmabuf->ops->mmap) return -EINVAL; /* check for offset overflow */ if (pgoff + vma_pages(vma) < pgoff) return -EOVERFLOW; /* check for overflowing the buffer's size */ if (pgoff + vma_pages(vma) > dmabuf->size >> PAGE_SHIFT) return -EINVAL; /* readjust the vma */ vma_set_file(vma, dmabuf->file); vma->vm_pgoff = pgoff; return dmabuf->ops->mmap(dmabuf, vma); } EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF); /** * dma_buf_vmap - Create virtual mapping for the buffer object into kernel * address space. Same restrictions as for vmap and friends apply. * @dmabuf: [in] buffer to vmap * @map: [out] returns the vmap pointer * * This call may fail due to lack of virtual mapping address space. * These calls are optional in drivers. The intended use for them * is for mapping objects linear in kernel space for high use objects. * * To ensure coherency users must call dma_buf_begin_cpu_access() and * dma_buf_end_cpu_access() around any cpu access performed through this * mapping. * * Returns 0 on success, or a negative errno code otherwise. */ int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) { struct iosys_map ptr; int ret; iosys_map_clear(map); if (WARN_ON(!dmabuf)) return -EINVAL; dma_resv_assert_held(dmabuf->resv); if (!dmabuf->ops->vmap) return -EINVAL; if (dmabuf->vmapping_counter) { dmabuf->vmapping_counter++; BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); *map = dmabuf->vmap_ptr; return 0; } BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr)); ret = dmabuf->ops->vmap(dmabuf, &ptr); if (WARN_ON_ONCE(ret)) return ret; dmabuf->vmap_ptr = ptr; dmabuf->vmapping_counter = 1; *map = dmabuf->vmap_ptr; return 0; } EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF); /** * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel * address space. Same restrictions as for vmap and friends apply. * @dmabuf: [in] buffer to vmap * @map: [out] returns the vmap pointer * * Unlocked version of dma_buf_vmap() * * Returns 0 on success, or a negative errno code otherwise. */ int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map) { int ret; iosys_map_clear(map); if (WARN_ON(!dmabuf)) return -EINVAL; dma_resv_lock(dmabuf->resv, NULL); ret = dma_buf_vmap(dmabuf, map); dma_resv_unlock(dmabuf->resv); return ret; } EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF); /** * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. * @dmabuf: [in] buffer to vunmap * @map: [in] vmap pointer to vunmap */ void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) { if (WARN_ON(!dmabuf)) return; dma_resv_assert_held(dmabuf->resv); BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); BUG_ON(dmabuf->vmapping_counter == 0); BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map)); if (--dmabuf->vmapping_counter == 0) { if (dmabuf->ops->vunmap) dmabuf->ops->vunmap(dmabuf, map); iosys_map_clear(&dmabuf->vmap_ptr); } } EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF); /** * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap. * @dmabuf: [in] buffer to vunmap * @map: [in] vmap pointer to vunmap */ void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map) { if (WARN_ON(!dmabuf)) return; dma_resv_lock(dmabuf->resv, NULL); dma_buf_vunmap(dmabuf, map); dma_resv_unlock(dmabuf->resv); } EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF); #ifdef CONFIG_DEBUG_FS static int dma_buf_debug_show(struct seq_file *s, void *unused) { struct dma_buf *buf_obj; struct dma_buf_attachment *attach_obj; int count = 0, attach_count; size_t size = 0; int ret; ret = mutex_lock_interruptible(&debugfs_list_mutex); if (ret) return ret; seq_puts(s, "\nDma-buf Objects:\n"); seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n", "size", "flags", "mode", "count", "ino"); list_for_each_entry(buf_obj, &debugfs_list, list_node) { ret = dma_resv_lock_interruptible(buf_obj->resv, NULL); if (ret) goto error_unlock; spin_lock(&buf_obj->name_lock); seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n", buf_obj->size, buf_obj->file->f_flags, buf_obj->file->f_mode, file_count(buf_obj->file), buf_obj->exp_name, file_inode(buf_obj->file)->i_ino, buf_obj->name ?: "<none>"); spin_unlock(&buf_obj->name_lock); dma_resv_describe(buf_obj->resv, s); seq_puts(s, "\tAttached Devices:\n"); attach_count = 0; list_for_each_entry(attach_obj, &buf_obj->attachments, node) { seq_printf(s, "\t%s\n", dev_name(attach_obj->dev)); attach_count++; } dma_resv_unlock(buf_obj->resv); seq_printf(s, "Total %d devices attached\n\n", attach_count); count++; size += buf_obj->size; } seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); mutex_unlock(&debugfs_list_mutex); return 0; error_unlock: mutex_unlock(&debugfs_list_mutex); return ret; } DEFINE_SHOW_ATTRIBUTE(dma_buf_debug); static struct dentry *dma_buf_debugfs_dir; static int dma_buf_init_debugfs(void) { struct dentry *d; int err = 0; d = debugfs_create_dir("dma_buf", NULL); if (IS_ERR(d)) return PTR_ERR(d); dma_buf_debugfs_dir = d; d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir, NULL, &dma_buf_debug_fops); if (IS_ERR(d)) { pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); debugfs_remove_recursive(dma_buf_debugfs_dir); dma_buf_debugfs_dir = NULL; err = PTR_ERR(d); } return err; } static void dma_buf_uninit_debugfs(void) { debugfs_remove_recursive(dma_buf_debugfs_dir); } #else static inline int dma_buf_init_debugfs(void) { return 0; } static inline void dma_buf_uninit_debugfs(void) { } #endif static int __init dma_buf_init(void) { int ret; ret = dma_buf_init_sysfs_statistics(); if (ret) return ret; dma_buf_mnt = kern_mount(&dma_buf_fs_type); if (IS_ERR(dma_buf_mnt)) return PTR_ERR(dma_buf_mnt); dma_buf_init_debugfs(); return 0; } subsys_initcall(dma_buf_init); static void __exit dma_buf_deinit(void) { dma_buf_uninit_debugfs(); kern_unmount(dma_buf_mnt); dma_buf_uninit_sysfs_statistics(); } __exitcall(dma_buf_deinit);
1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 // SPDX-License-Identifier: GPL-2.0-or-later /* * i2c support for Silicon Labs' CP2615 Digital Audio Bridge * * (c) 2021, Bence Csókás <bence98@sch.bme.hu> */ #include <linux/errno.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/usb.h> /** CP2615 I/O Protocol implementation */ #define CP2615_VID 0x10c4 #define CP2615_PID 0xeac1 #define IOP_EP_IN 0x82 #define IOP_EP_OUT 0x02 #define IOP_IFN 1 #define IOP_ALTSETTING 2 #define MAX_IOP_SIZE 64 #define MAX_IOP_PAYLOAD_SIZE (MAX_IOP_SIZE - 6) #define MAX_I2C_SIZE (MAX_IOP_PAYLOAD_SIZE - 4) enum cp2615_iop_msg_type { iop_GetAccessoryInfo = 0xD100, iop_AccessoryInfo = 0xA100, iop_GetPortConfiguration = 0xD203, iop_PortConfiguration = 0xA203, iop_DoI2cTransfer = 0xD400, iop_I2cTransferResult = 0xA400, iop_GetSerialState = 0xD501, iop_SerialState = 0xA501 }; struct __packed cp2615_iop_msg { __be16 preamble, length, msg; u8 data[MAX_IOP_PAYLOAD_SIZE]; }; #define PART_ID_A01 0x1400 #define PART_ID_A02 0x1500 struct __packed cp2615_iop_accessory_info { __be16 part_id, option_id, proto_ver; }; struct __packed cp2615_i2c_transfer { u8 tag, i2caddr, read_len, write_len; u8 data[MAX_I2C_SIZE]; }; /* Possible values for struct cp2615_i2c_transfer_result.status */ enum cp2615_i2c_status { /* Writing to the internal EEPROM failed, because it is locked */ CP2615_CFG_LOCKED = -6, /* read_len or write_len out of range */ CP2615_INVALID_PARAM = -4, /* I2C target did not ACK in time */ CP2615_TIMEOUT, /* I2C bus busy */ CP2615_BUS_BUSY, /* I2C bus error (ie. target NAK'd the request) */ CP2615_BUS_ERROR, CP2615_SUCCESS }; struct __packed cp2615_i2c_transfer_result { u8 tag, i2caddr; s8 status; u8 read_len; u8 data[MAX_I2C_SIZE]; }; static int cp2615_init_iop_msg(struct cp2615_iop_msg *ret, enum cp2615_iop_msg_type msg, const void *data, size_t data_len) { if (data_len > MAX_IOP_PAYLOAD_SIZE) return -EFBIG; if (!ret) return -EINVAL; ret->preamble = htons(0x2A2AU); ret->length = htons(data_len + 6); ret->msg = htons(msg); if (data && data_len) memcpy(&ret->data, data, data_len); return 0; } static int cp2615_init_i2c_msg(struct cp2615_iop_msg *ret, const struct cp2615_i2c_transfer *data) { return cp2615_init_iop_msg(ret, iop_DoI2cTransfer, data, 4 + data->write_len); } /* Translates status codes to Linux errno's */ static int cp2615_check_status(enum cp2615_i2c_status status) { switch (status) { case CP2615_SUCCESS: return 0; case CP2615_BUS_ERROR: return -ENXIO; case CP2615_BUS_BUSY: return -EAGAIN; case CP2615_TIMEOUT: return -ETIMEDOUT; case CP2615_INVALID_PARAM: return -EINVAL; case CP2615_CFG_LOCKED: return -EPERM; } /* Unknown error code */ return -EPROTO; } /** Driver code */ static int cp2615_i2c_send(struct usb_interface *usbif, struct cp2615_i2c_transfer *i2c_w) { struct cp2615_iop_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL); struct usb_device *usbdev = interface_to_usbdev(usbif); int res = cp2615_init_i2c_msg(msg, i2c_w); if (!res) res = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, IOP_EP_OUT), msg, ntohs(msg->length), NULL, 0); kfree(msg); return res; } static int cp2615_i2c_recv(struct usb_interface *usbif, unsigned char tag, void *buf) { struct usb_device *usbdev = interface_to_usbdev(usbif); struct cp2615_iop_msg *msg; struct cp2615_i2c_transfer_result *i2c_r; int res; msg = kzalloc(sizeof(*msg), GFP_KERNEL); if (!msg) return -ENOMEM; res = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, IOP_EP_IN), msg, sizeof(struct cp2615_iop_msg), NULL, 0); if (res < 0) { kfree(msg); return res; } i2c_r = (struct cp2615_i2c_transfer_result *)&msg->data; if (msg->msg != htons(iop_I2cTransferResult) || i2c_r->tag != tag) { kfree(msg); return -EIO; } res = cp2615_check_status(i2c_r->status); if (!res) memcpy(buf, &i2c_r->data, i2c_r->read_len); kfree(msg); return res; } /* Checks if the IOP is functional by querying the part's ID */ static int cp2615_check_iop(struct usb_interface *usbif) { struct cp2615_iop_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL); struct cp2615_iop_accessory_info *info = (struct cp2615_iop_accessory_info *)&msg->data; struct usb_device *usbdev = interface_to_usbdev(usbif); int res = cp2615_init_iop_msg(msg, iop_GetAccessoryInfo, NULL, 0); if (res) goto out; res = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, IOP_EP_OUT), msg, ntohs(msg->length), NULL, 0); if (res) goto out; res = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, IOP_EP_IN), msg, sizeof(struct cp2615_iop_msg), NULL, 0); if (res) goto out; if (msg->msg != htons(iop_AccessoryInfo)) { res = -EIO; goto out; } switch (ntohs(info->part_id)) { case PART_ID_A01: dev_dbg(&usbif->dev, "Found A01 part. (WARNING: errata exists!)\n"); break; case PART_ID_A02: dev_dbg(&usbif->dev, "Found good A02 part.\n"); break; default: dev_warn(&usbif->dev, "Unknown part ID %04X\n", ntohs(info->part_id)); } out: kfree(msg); return res; } static int cp2615_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct usb_interface *usbif = adap->algo_data; int i = 0, ret = 0; struct i2c_msg *msg; struct cp2615_i2c_transfer i2c_w = {0}; dev_dbg(&usbif->dev, "Doing %d I2C transactions\n", num); for (; !ret && i < num; i++) { msg = &msgs[i]; i2c_w.tag = 0xdd; i2c_w.i2caddr = i2c_8bit_addr_from_msg(msg); if (msg->flags & I2C_M_RD) { i2c_w.read_len = msg->len; i2c_w.write_len = 0; } else { i2c_w.read_len = 0; i2c_w.write_len = msg->len; memcpy(&i2c_w.data, msg->buf, i2c_w.write_len); } ret = cp2615_i2c_send(usbif, &i2c_w); if (ret) break; ret = cp2615_i2c_recv(usbif, i2c_w.tag, msg->buf); } if (ret < 0) return ret; return i; } static u32 cp2615_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm cp2615_i2c_algo = { .xfer = cp2615_i2c_xfer, .functionality = cp2615_i2c_func, }; /* * This chip has some limitations: one is that the USB endpoint * can only receive 64 bytes/transfer, that leaves 54 bytes for * the I2C transfer. On top of that, EITHER read_len OR write_len * may be zero, but not both. If both are non-zero, the adapter * issues a write followed by a read. And the chip does not * support repeated START between the write and read phases. */ static struct i2c_adapter_quirks cp2615_i2c_quirks = { .max_write_len = MAX_I2C_SIZE, .max_read_len = MAX_I2C_SIZE, .flags = I2C_AQ_COMB_WRITE_THEN_READ | I2C_AQ_NO_ZERO_LEN | I2C_AQ_NO_REP_START, .max_comb_1st_msg_len = MAX_I2C_SIZE, .max_comb_2nd_msg_len = MAX_I2C_SIZE }; static void cp2615_i2c_remove(struct usb_interface *usbif) { struct i2c_adapter *adap = usb_get_intfdata(usbif); usb_set_intfdata(usbif, NULL); i2c_del_adapter(adap); } static int cp2615_i2c_probe(struct usb_interface *usbif, const struct usb_device_id *id) { int ret = 0; struct i2c_adapter *adap; struct usb_device *usbdev = interface_to_usbdev(usbif); ret = usb_set_interface(usbdev, IOP_IFN, IOP_ALTSETTING); if (ret) return ret; ret = cp2615_check_iop(usbif); if (ret) return ret; adap = devm_kzalloc(&usbif->dev, sizeof(struct i2c_adapter), GFP_KERNEL); if (!adap) return -ENOMEM; strscpy(adap->name, usbdev->serial, sizeof(adap->name)); adap->owner = THIS_MODULE; adap->dev.parent = &usbif->dev; adap->dev.of_node = usbif->dev.of_node; adap->timeout = HZ; adap->algo = &cp2615_i2c_algo; adap->quirks = &cp2615_i2c_quirks; adap->algo_data = usbif; ret = i2c_add_adapter(adap); if (ret) return ret; usb_set_intfdata(usbif, adap); return 0; } static const struct usb_device_id id_table[] = { { USB_DEVICE_INTERFACE_NUMBER(CP2615_VID, CP2615_PID, IOP_IFN) }, { } }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_driver cp2615_i2c_driver = { .name = "i2c-cp2615", .probe = cp2615_i2c_probe, .disconnect = cp2615_i2c_remove, .id_table = id_table, }; module_usb_driver(cp2615_i2c_driver); MODULE_AUTHOR("Bence Csókás <bence98@sch.bme.hu>"); MODULE_DESCRIPTION("CP2615 I2C bus driver"); MODULE_LICENSE("GPL");
12 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 // SPDX-License-Identifier: GPL-2.0-or-later /* * Tap functions for AF_VSOCK sockets. * * Code based on net/netlink/af_netlink.c tap functions. */ #include <linux/module.h> #include <net/sock.h> #include <net/af_vsock.h> #include <linux/if_arp.h> static DEFINE_SPINLOCK(vsock_tap_lock); static struct list_head vsock_tap_all __read_mostly = LIST_HEAD_INIT(vsock_tap_all); int vsock_add_tap(struct vsock_tap *vt) { if (unlikely(vt->dev->type != ARPHRD_VSOCKMON)) return -EINVAL; __module_get(vt->module); spin_lock(&vsock_tap_lock); list_add_rcu(&vt->list, &vsock_tap_all); spin_unlock(&vsock_tap_lock); return 0; } EXPORT_SYMBOL_GPL(vsock_add_tap); int vsock_remove_tap(struct vsock_tap *vt) { struct vsock_tap *tmp; bool found = false; spin_lock(&vsock_tap_lock); list_for_each_entry(tmp, &vsock_tap_all, list) { if (vt == tmp) { list_del_rcu(&vt->list); found = true; goto out; } } pr_warn("vsock_remove_tap: %p not found\n", vt); out: spin_unlock(&vsock_tap_lock); synchronize_net(); if (found) module_put(vt->module); return found ? 0 : -ENODEV; } EXPORT_SYMBOL_GPL(vsock_remove_tap); static int __vsock_deliver_tap_skb(struct sk_buff *skb, struct net_device *dev) { int ret = 0; struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); if (nskb) { dev_hold(dev); nskb->dev = dev; ret = dev_queue_xmit(nskb); if (unlikely(ret > 0)) ret = net_xmit_errno(ret); dev_put(dev); } return ret; } static void __vsock_deliver_tap(struct sk_buff *skb) { int ret; struct vsock_tap *tmp; list_for_each_entry_rcu(tmp, &vsock_tap_all, list) { ret = __vsock_deliver_tap_skb(skb, tmp->dev); if (unlikely(ret)) break; } } void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque) { struct sk_buff *skb; rcu_read_lock(); if (likely(list_empty(&vsock_tap_all))) goto out; skb = build_skb(opaque); if (skb) { __vsock_deliver_tap(skb); consume_skb(skb); } out: rcu_read_unlock(); } EXPORT_SYMBOL_GPL(vsock_deliver_tap);
4 26 22 4 26 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 // SPDX-License-Identifier: GPL-2.0-or-later /* Module signature checker * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/module_signature.h> #include <linux/string.h> #include <linux/verification.h> #include <linux/security.h> #include <crypto/public_key.h> #include <uapi/linux/module.h> #include "internal.h" #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "module." static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); module_param(sig_enforce, bool_enable_only, 0644); /* * Export sig_enforce kernel cmdline parameter to allow other subsystems rely * on that instead of directly to CONFIG_MODULE_SIG_FORCE config. */ bool is_module_sig_enforced(void) { return sig_enforce; } EXPORT_SYMBOL(is_module_sig_enforced); void set_module_sig_enforced(void) { sig_enforce = true; } /* * Verify the signature on a module. */ int mod_verify_sig(const void *mod, struct load_info *info) { struct module_signature ms; size_t sig_len, modlen = info->len; int ret; pr_devel("==>%s(,%zu)\n", __func__, modlen); if (modlen <= sizeof(ms)) return -EBADMSG; memcpy(&ms, mod + (modlen - sizeof(ms)), sizeof(ms)); ret = mod_check_sig(&ms, modlen, "module"); if (ret) return ret; sig_len = be32_to_cpu(ms.sig_len); modlen -= sig_len + sizeof(ms); info->len = modlen; return verify_pkcs7_signature(mod, modlen, mod + modlen, sig_len, VERIFY_USE_SECONDARY_KEYRING, VERIFYING_MODULE_SIGNATURE, NULL, NULL); } int module_sig_check(struct load_info *info, int flags) { int err = -ENODATA; const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; const char *reason; const void *mod = info->hdr; bool mangled_module = flags & (MODULE_INIT_IGNORE_MODVERSIONS | MODULE_INIT_IGNORE_VERMAGIC); /* * Do not allow mangled modules as a module with version information * removed is no longer the module that was signed. */ if (!mangled_module && info->len > markerlen && memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { /* We truncate the module to discard the signature */ info->len -= markerlen; err = mod_verify_sig(mod, info); if (!err) { info->sig_ok = true; return 0; } } /* * We don't permit modules to be loaded into the trusted kernels * without a valid signature on them, but if we're not enforcing, * certain errors are non-fatal. */ switch (err) { case -ENODATA: reason = "unsigned module"; break; case -ENOPKG: reason = "module with unsupported crypto"; break; case -ENOKEY: reason = "module with unavailable key"; break; default: /* * All other errors are fatal, including lack of memory, * unparseable signatures, and signature check failures -- * even if signatures aren't required. */ return err; } if (is_module_sig_enforced()) { pr_notice("Loading of %s is rejected\n", reason); return -EKEYREJECTED; } return security_locked_down(LOCKDOWN_MODULE_SIGNATURE); }
93 2382 1356 5868 6029 24 6024 99 9280 9285 9093 9097 9115 2575 2580 93 93 5738 2910 2912 4144 1680 4451 1376 4916 1003 3747 3750 3747 2407 2407 63 63 4201 1 4212 4192 4134 83 48 2138 3394 3874 589 4186 4195 4152 19 4170 3152 52874 51262 2380 3177 2230 59 2224 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/file_table.c * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/string.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/init.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/filelock.h> #include <linux/security.h> #include <linux/cred.h> #include <linux/eventpoll.h> #include <linux/rcupdate.h> #include <linux/mount.h> #include <linux/capability.h> #include <linux/cdev.h> #include <linux/fsnotify.h> #include <linux/sysctl.h> #include <linux/percpu_counter.h> #include <linux/percpu.h> #include <linux/task_work.h> #include <linux/swap.h> #include <linux/kmemleak.h> #include <linux/atomic.h> #include "internal.h" /* sysctl tunables... */ static struct files_stat_struct files_stat = { .max_files = NR_FILE }; /* SLAB cache for file structures */ static struct kmem_cache *filp_cachep __ro_after_init; static struct percpu_counter nr_files __cacheline_aligned_in_smp; /* Container for backing file with optional user path */ struct backing_file { struct file file; struct path user_path; }; static inline struct backing_file *backing_file(struct file *f) { return container_of(f, struct backing_file, file); } struct path *backing_file_user_path(struct file *f) { return &backing_file(f)->user_path; } EXPORT_SYMBOL_GPL(backing_file_user_path); static inline void file_free(struct file *f) { security_file_free(f); if (likely(!(f->f_mode & FMODE_NOACCOUNT))) percpu_counter_dec(&nr_files); put_cred(f->f_cred); if (unlikely(f->f_mode & FMODE_BACKING)) { path_put(backing_file_user_path(f)); kfree(backing_file(f)); } else { kmem_cache_free(filp_cachep, f); } } /* * Return the total number of open files in the system */ static long get_nr_files(void) { return percpu_counter_read_positive(&nr_files); } /* * Return the maximum number of open files in the system */ unsigned long get_max_files(void) { return files_stat.max_files; } EXPORT_SYMBOL_GPL(get_max_files); #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) /* * Handle nr_files sysctl */ static int proc_nr_files(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { files_stat.nr_files = get_nr_files(); return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } static struct ctl_table fs_stat_sysctls[] = { { .procname = "file-nr", .data = &files_stat, .maxlen = sizeof(files_stat), .mode = 0444, .proc_handler = proc_nr_files, }, { .procname = "file-max", .data = &files_stat.max_files, .maxlen = sizeof(files_stat.max_files), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = SYSCTL_LONG_ZERO, .extra2 = SYSCTL_LONG_MAX, }, { .procname = "nr_open", .data = &sysctl_nr_open, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &sysctl_nr_open_min, .extra2 = &sysctl_nr_open_max, }, }; static int __init init_fs_stat_sysctls(void) { register_sysctl_init("fs", fs_stat_sysctls); if (IS_ENABLED(CONFIG_BINFMT_MISC)) { struct ctl_table_header *hdr; hdr = register_sysctl_mount_point("fs/binfmt_misc"); kmemleak_not_leak(hdr); } return 0; } fs_initcall(init_fs_stat_sysctls); #endif static int init_file(struct file *f, int flags, const struct cred *cred) { int error; f->f_cred = get_cred(cred); error = security_file_alloc(f); if (unlikely(error)) { put_cred(f->f_cred); return error; } rwlock_init(&f->f_owner.lock); spin_lock_init(&f->f_lock); mutex_init(&f->f_pos_lock); f->f_flags = flags; f->f_mode = OPEN_FMODE(flags); /* f->f_version: 0 */ /* * We're SLAB_TYPESAFE_BY_RCU so initialize f_count last. While * fget-rcu pattern users need to be able to handle spurious * refcount bumps we should reinitialize the reused file first. */ atomic_long_set(&f->f_count, 1); return 0; } /* Find an unused file structure and return a pointer to it. * Returns an error pointer if some error happend e.g. we over file * structures limit, run out of memory or operation is not permitted. * * Be very careful using this. You are responsible for * getting write access to any mount that you might assign * to this filp, if it is opened for write. If this is not * done, you will imbalance int the mount's writer count * and a warning at __fput() time. */ struct file *alloc_empty_file(int flags, const struct cred *cred) { static long old_max; struct file *f; int error; /* * Privileged users can go above max_files */ if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { /* * percpu_counters are inaccurate. Do an expensive check before * we go and fail. */ if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) goto over; } f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); if (unlikely(!f)) return ERR_PTR(-ENOMEM); error = init_file(f, flags, cred); if (unlikely(error)) { kmem_cache_free(filp_cachep, f); return ERR_PTR(error); } percpu_counter_inc(&nr_files); return f; over: /* Ran out of filps - report that */ if (get_nr_files() > old_max) { pr_info("VFS: file-max limit %lu reached\n", get_max_files()); old_max = get_nr_files(); } return ERR_PTR(-ENFILE); } /* * Variant of alloc_empty_file() that doesn't check and modify nr_files. * * This is only for kernel internal use, and the allocate file must not be * installed into file tables or such. */ struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred) { struct file *f; int error; f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); if (unlikely(!f)) return ERR_PTR(-ENOMEM); error = init_file(f, flags, cred); if (unlikely(error)) { kmem_cache_free(filp_cachep, f); return ERR_PTR(error); } f->f_mode |= FMODE_NOACCOUNT; return f; } /* * Variant of alloc_empty_file() that allocates a backing_file container * and doesn't check and modify nr_files. * * This is only for kernel internal use, and the allocate file must not be * installed into file tables or such. */ struct file *alloc_empty_backing_file(int flags, const struct cred *cred) { struct backing_file *ff; int error; ff = kzalloc(sizeof(struct backing_file), GFP_KERNEL); if (unlikely(!ff)) return ERR_PTR(-ENOMEM); error = init_file(&ff->file, flags, cred); if (unlikely(error)) { kfree(ff); return ERR_PTR(error); } ff->file.f_mode |= FMODE_BACKING | FMODE_NOACCOUNT; return &ff->file; } /** * file_init_path - initialize a 'struct file' based on path * * @file: the file to set up * @path: the (dentry, vfsmount) pair for the new file * @fop: the 'struct file_operations' for the new file */ static void file_init_path(struct file *file, const struct path *path, const struct file_operations *fop) { file->f_path = *path; file->f_inode = path->dentry->d_inode; file->f_mapping = path->dentry->d_inode->i_mapping; file->f_wb_err = filemap_sample_wb_err(file->f_mapping); file->f_sb_err = file_sample_sb_err(file); if (fop->llseek) file->f_mode |= FMODE_LSEEK; if ((file->f_mode & FMODE_READ) && likely(fop->read || fop->read_iter)) file->f_mode |= FMODE_CAN_READ; if ((file->f_mode & FMODE_WRITE) && likely(fop->write || fop->write_iter)) file->f_mode |= FMODE_CAN_WRITE; file->f_iocb_flags = iocb_flags(file); file->f_mode |= FMODE_OPENED; file->f_op = fop; if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) i_readcount_inc(path->dentry->d_inode); } /** * alloc_file - allocate and initialize a 'struct file' * * @path: the (dentry, vfsmount) pair for the new file * @flags: O_... flags with which the new file will be opened * @fop: the 'struct file_operations' for the new file */ static struct file *alloc_file(const struct path *path, int flags, const struct file_operations *fop) { struct file *file; file = alloc_empty_file(flags, current_cred()); if (!IS_ERR(file)) file_init_path(file, path, fop); return file; } static inline int alloc_path_pseudo(const char *name, struct inode *inode, struct vfsmount *mnt, struct path *path) { struct qstr this = QSTR_INIT(name, strlen(name)); path->dentry = d_alloc_pseudo(mnt->mnt_sb, &this); if (!path->dentry) return -ENOMEM; path->mnt = mntget(mnt); d_instantiate(path->dentry, inode); return 0; } struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt, const char *name, int flags, const struct file_operations *fops) { int ret; struct path path; struct file *file; ret = alloc_path_pseudo(name, inode, mnt, &path); if (ret) return ERR_PTR(ret); file = alloc_file(&path, flags, fops); if (IS_ERR(file)) { ihold(inode); path_put(&path); } return file; } EXPORT_SYMBOL(alloc_file_pseudo); struct file *alloc_file_pseudo_noaccount(struct inode *inode, struct vfsmount *mnt, const char *name, int flags, const struct file_operations *fops) { int ret; struct path path; struct file *file; ret = alloc_path_pseudo(name, inode, mnt, &path); if (ret) return ERR_PTR(ret); file = alloc_empty_file_noaccount(flags, current_cred()); if (IS_ERR(file)) { ihold(inode); path_put(&path); return file; } file_init_path(file, &path, fops); return file; } EXPORT_SYMBOL_GPL(alloc_file_pseudo_noaccount); struct file *alloc_file_clone(struct file *base, int flags, const struct file_operations *fops) { struct file *f = alloc_file(&base->f_path, flags, fops); if (!IS_ERR(f)) { path_get(&f->f_path); f->f_mapping = base->f_mapping; } return f; } /* the real guts of fput() - releasing the last reference to file */ static void __fput(struct file *file) { struct dentry *dentry = file->f_path.dentry; struct vfsmount *mnt = file->f_path.mnt; struct inode *inode = file->f_inode; fmode_t mode = file->f_mode; if (unlikely(!(file->f_mode & FMODE_OPENED))) goto out; might_sleep(); fsnotify_close(file); /* * The function eventpoll_release() should be the first called * in the file cleanup chain. */ eventpoll_release(file); locks_remove_file(file); security_file_release(file); if (unlikely(file->f_flags & FASYNC)) { if (file->f_op->fasync) file->f_op->fasync(-1, file, 0); } if (file->f_op->release) file->f_op->release(inode, file); if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL && !(mode & FMODE_PATH))) { cdev_put(inode->i_cdev); } fops_put(file->f_op); put_pid(file->f_owner.pid); put_file_access(file); dput(dentry); if (unlikely(mode & FMODE_NEED_UNMOUNT)) dissolve_on_fput(mnt); mntput(mnt); out: file_free(file); } static LLIST_HEAD(delayed_fput_list); static void delayed_fput(struct work_struct *unused) { struct llist_node *node = llist_del_all(&delayed_fput_list); struct file *f, *t; llist_for_each_entry_safe(f, t, node, f_llist) __fput(f); } static void ____fput(struct callback_head *work) { __fput(container_of(work, struct file, f_task_work)); } /* * If kernel thread really needs to have the final fput() it has done * to complete, call this. The only user right now is the boot - we * *do* need to make sure our writes to binaries on initramfs has * not left us with opened struct file waiting for __fput() - execve() * won't work without that. Please, don't add more callers without * very good reasons; in particular, never call that with locks * held and never call that from a thread that might need to do * some work on any kind of umount. */ void flush_delayed_fput(void) { delayed_fput(NULL); } EXPORT_SYMBOL_GPL(flush_delayed_fput); static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput); void fput(struct file *file) { if (atomic_long_dec_and_test(&file->f_count)) { struct task_struct *task = current; if (unlikely(!(file->f_mode & (FMODE_BACKING | FMODE_OPENED)))) { file_free(file); return; } if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { init_task_work(&file->f_task_work, ____fput); if (!task_work_add(task, &file->f_task_work, TWA_RESUME)) return; /* * After this task has run exit_task_work(), * task_work_add() will fail. Fall through to delayed * fput to avoid leaking *file. */ } if (llist_add(&file->f_llist, &delayed_fput_list)) schedule_delayed_work(&delayed_fput_work, 1); } } /* * synchronous analog of fput(); for kernel threads that might be needed * in some umount() (and thus can't use flush_delayed_fput() without * risking deadlocks), need to wait for completion of __fput() and know * for this specific struct file it won't involve anything that would * need them. Use only if you really need it - at the very least, * don't blindly convert fput() by kernel thread to that. */ void __fput_sync(struct file *file) { if (atomic_long_dec_and_test(&file->f_count)) __fput(file); } EXPORT_SYMBOL(fput); EXPORT_SYMBOL(__fput_sync); void __init files_init(void) { filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL); percpu_counter_init(&nr_files, 0, GFP_KERNEL); } /* * One file with associated inode and dcache is very roughly 1K. Per default * do not use more than 10% of our memory for files. */ void __init files_maxfiles_init(void) { unsigned long n; unsigned long nr_pages = totalram_pages(); unsigned long memreserve = (nr_pages - nr_free_pages()) * 3/2; memreserve = min(memreserve, nr_pages - 1); n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10; files_stat.max_files = max_t(unsigned long, n, NR_FILE); }
4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2008 Christoph Hellwig. * Portions Copyright (C) 2000-2008 Silicon Graphics, Inc. */ #include "xfs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_da_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_da_btree.h" #include "xfs_attr.h" #include "xfs_acl.h" #include "xfs_log.h" #include "xfs_xattr.h" #include "xfs_quota.h" #include <linux/posix_acl_xattr.h> /* * Get permission to use log-assisted atomic exchange of file extents. * Callers must not be running any transactions or hold any ILOCKs. */ static inline int xfs_attr_grab_log_assist( struct xfs_mount *mp) { int error = 0; /* xattr update log intent items are already enabled */ if (xfs_is_using_logged_xattrs(mp)) return 0; /* * Check if the filesystem featureset is new enough to set this log * incompat feature bit. Strictly speaking, the minimum requirement is * a V5 filesystem for the superblock field, but we'll require rmap * or reflink to avoid having to deal with really old kernels. */ if (!xfs_has_reflink(mp) && !xfs_has_rmapbt(mp)) return -EOPNOTSUPP; /* Enable log-assisted xattrs. */ error = xfs_add_incompat_log_feature(mp, XFS_SB_FEAT_INCOMPAT_LOG_XATTRS); if (error) return error; xfs_set_using_logged_xattrs(mp); xfs_warn_mount(mp, XFS_OPSTATE_WARNED_LARP, "EXPERIMENTAL logged extended attributes feature in use. Use at your own risk!"); return 0; } static inline bool xfs_attr_want_log_assist( struct xfs_mount *mp) { #ifdef DEBUG /* Logged xattrs require a V5 super for log_incompat */ return xfs_has_crc(mp) && xfs_globals.larp; #else return false; #endif } /* * Set or remove an xattr, having grabbed the appropriate logging resources * prior to calling libxfs. Callers of this function are only required to * initialize the inode, attr_filter, name, namelen, value, and valuelen fields * of @args. */ int xfs_attr_change( struct xfs_da_args *args, enum xfs_attr_update op) { struct xfs_mount *mp = args->dp->i_mount; int error; if (xfs_is_shutdown(mp)) return -EIO; error = xfs_qm_dqattach(args->dp); if (error) return error; /* * We have no control over the attribute names that userspace passes us * to remove, so we have to allow the name lookup prior to attribute * removal to fail as well. */ args->op_flags = XFS_DA_OP_OKNOENT; if (xfs_attr_want_log_assist(mp)) { error = xfs_attr_grab_log_assist(mp); if (error) return error; args->op_flags |= XFS_DA_OP_LOGGED; } args->owner = args->dp->i_ino; args->geo = mp->m_attr_geo; args->whichfork = XFS_ATTR_FORK; xfs_attr_sethash(args); /* * Some xattrs must be resistant to allocation failure at ENOSPC, e.g. * creating an inode with ACLs or security attributes requires the * allocation of the xattr holding that information to succeed. Hence * we allow xattrs in the VFS TRUSTED, SYSTEM, POSIX_ACL and SECURITY * (LSM xattr) namespaces to dip into the reserve block pool to allow * manipulation of these xattrs when at ENOSPC. These VFS xattr * namespaces translate to the XFS_ATTR_ROOT and XFS_ATTR_SECURE on-disk * namespaces. * * For most of these cases, these special xattrs will fit in the inode * itself and so consume no extra space or only require temporary extra * space while an overwrite is being made. Hence the use of the reserved * pool is largely to avoid the worst case reservation from preventing * the xattr from being created at ENOSPC. */ return xfs_attr_set(args, op, args->attr_filter & (XFS_ATTR_ROOT | XFS_ATTR_SECURE)); } static int xfs_xattr_get(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *name, void *value, size_t size) { struct xfs_da_args args = { .dp = XFS_I(inode), .attr_filter = handler->flags, .name = name, .namelen = strlen(name), .value = value, .valuelen = size, }; int error; if (xfs_ifork_zapped(XFS_I(inode), XFS_ATTR_FORK)) return -EIO; error = xfs_attr_get(&args); if (error) return error; return args.valuelen; } static inline enum xfs_attr_update xfs_xattr_flags_to_op( int flags, const void *value) { if (!value) return XFS_ATTRUPDATE_REMOVE; if (flags & XATTR_CREATE) return XFS_ATTRUPDATE_CREATE; if (flags & XATTR_REPLACE) return XFS_ATTRUPDATE_REPLACE; return XFS_ATTRUPDATE_UPSERT; } static int xfs_xattr_set(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *unused, struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct xfs_da_args args = { .dp = XFS_I(inode), .attr_filter = handler->flags, .name = name, .namelen = strlen(name), .value = (void *)value, .valuelen = size, }; int error; error = xfs_attr_change(&args, xfs_xattr_flags_to_op(flags, value)); if (!error && (handler->flags & XFS_ATTR_ROOT)) xfs_forget_acl(inode, name); return error; } static const struct xattr_handler xfs_xattr_user_handler = { .prefix = XATTR_USER_PREFIX, .flags = 0, /* no flags implies user namespace */ .get = xfs_xattr_get, .set = xfs_xattr_set, }; static const struct xattr_handler xfs_xattr_trusted_handler = { .prefix = XATTR_TRUSTED_PREFIX, .flags = XFS_ATTR_ROOT, .get = xfs_xattr_get, .set = xfs_xattr_set, }; static const struct xattr_handler xfs_xattr_security_handler = { .prefix = XATTR_SECURITY_PREFIX, .flags = XFS_ATTR_SECURE, .get = xfs_xattr_get, .set = xfs_xattr_set, }; const struct xattr_handler * const xfs_xattr_handlers[] = { &xfs_xattr_user_handler, &xfs_xattr_trusted_handler, &xfs_xattr_security_handler, NULL }; static void __xfs_xattr_put_listent( struct xfs_attr_list_context *context, char *prefix, int prefix_len, unsigned char *name, int namelen) { char *offset; int arraytop; if (context->count < 0 || context->seen_enough) return; if (!context->buffer) goto compute_size; arraytop = context->count + prefix_len + namelen + 1; if (arraytop > context->firstu) { context->count = -1; /* insufficient space */ context->seen_enough = 1; return; } offset = context->buffer + context->count; memcpy(offset, prefix, prefix_len); offset += prefix_len; strncpy(offset, (char *)name, namelen); /* real name */ offset += namelen; *offset = '\0'; compute_size: context->count += prefix_len + namelen + 1; return; } static void xfs_xattr_put_listent( struct xfs_attr_list_context *context, int flags, unsigned char *name, int namelen, void *value, int valuelen) { char *prefix; int prefix_len; ASSERT(context->count >= 0); /* Don't expose private xattr namespaces. */ if (flags & XFS_ATTR_PRIVATE_NSP_MASK) return; if (flags & XFS_ATTR_ROOT) { #ifdef CONFIG_XFS_POSIX_ACL if (namelen == SGI_ACL_FILE_SIZE && strncmp(name, SGI_ACL_FILE, SGI_ACL_FILE_SIZE) == 0) { __xfs_xattr_put_listent( context, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN, XATTR_POSIX_ACL_ACCESS, strlen(XATTR_POSIX_ACL_ACCESS)); } else if (namelen == SGI_ACL_DEFAULT_SIZE && strncmp(name, SGI_ACL_DEFAULT, SGI_ACL_DEFAULT_SIZE) == 0) { __xfs_xattr_put_listent( context, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN, XATTR_POSIX_ACL_DEFAULT, strlen(XATTR_POSIX_ACL_DEFAULT)); } #endif /* * Only show root namespace entries if we are actually allowed to * see them. */ if (!capable(CAP_SYS_ADMIN)) return; prefix = XATTR_TRUSTED_PREFIX; prefix_len = XATTR_TRUSTED_PREFIX_LEN; } else if (flags & XFS_ATTR_SECURE) { prefix = XATTR_SECURITY_PREFIX; prefix_len = XATTR_SECURITY_PREFIX_LEN; } else { prefix = XATTR_USER_PREFIX; prefix_len = XATTR_USER_PREFIX_LEN; } __xfs_xattr_put_listent(context, prefix, prefix_len, name, namelen); return; } ssize_t xfs_vn_listxattr( struct dentry *dentry, char *data, size_t size) { struct xfs_attr_list_context context; struct inode *inode = d_inode(dentry); int error; if (xfs_ifork_zapped(XFS_I(inode), XFS_ATTR_FORK)) return -EIO; /* * First read the regular on-disk attributes. */ memset(&context, 0, sizeof(context)); context.dp = XFS_I(inode); context.resynch = 1; context.buffer = size ? data : NULL; context.bufsize = size; context.firstu = context.bufsize; context.put_listent = xfs_xattr_put_listent; error = xfs_attr_list(&context); if (error) return error; if (context.count < 0) return -ERANGE; return context.count; }
11 50 50 146 148 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 // SPDX-License-Identifier: GPL-2.0-only /* * Support KVM gust page tracking * * This feature allows us to track page access in guest. Currently, only * write access is tracked. * * Copyright(C) 2015 Intel Corporation. * * Author: * Xiao Guangrong <guangrong.xiao@linux.intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/lockdep.h> #include <linux/kvm_host.h> #include <linux/rculist.h> #include "mmu.h" #include "mmu_internal.h" #include "page_track.h" static bool kvm_external_write_tracking_enabled(struct kvm *kvm) { #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING /* * Read external_write_tracking_enabled before related pointers. Pairs * with the smp_store_release in kvm_page_track_write_tracking_enable(). */ return smp_load_acquire(&kvm->arch.external_write_tracking_enabled); #else return false; #endif } bool kvm_page_track_write_tracking_enabled(struct kvm *kvm) { return kvm_external_write_tracking_enabled(kvm) || kvm_shadow_root_allocated(kvm) || !tdp_enabled; } void kvm_page_track_free_memslot(struct kvm_memory_slot *slot) { vfree(slot->arch.gfn_write_track); slot->arch.gfn_write_track = NULL; } static int __kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot, unsigned long npages) { const size_t size = sizeof(*slot->arch.gfn_write_track); if (!slot->arch.gfn_write_track) slot->arch.gfn_write_track = __vcalloc(npages, size, GFP_KERNEL_ACCOUNT); return slot->arch.gfn_write_track ? 0 : -ENOMEM; } int kvm_page_track_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) { if (!kvm_page_track_write_tracking_enabled(kvm)) return 0; return __kvm_page_track_write_tracking_alloc(slot, npages); } int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot) { return __kvm_page_track_write_tracking_alloc(slot, slot->npages); } static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn, short count) { int index, val; index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); val = slot->arch.gfn_write_track[index]; if (WARN_ON_ONCE(val + count < 0 || val + count > USHRT_MAX)) return; slot->arch.gfn_write_track[index] += count; } void __kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) { lockdep_assert_held_write(&kvm->mmu_lock); lockdep_assert_once(lockdep_is_held(&kvm->slots_lock) || srcu_read_lock_held(&kvm->srcu)); if (KVM_BUG_ON(!kvm_page_track_write_tracking_enabled(kvm), kvm)) return; update_gfn_write_track(slot, gfn, 1); /* * new track stops large page mapping for the * tracked page. */ kvm_mmu_gfn_disallow_lpage(slot, gfn); if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) kvm_flush_remote_tlbs(kvm); } void __kvm_write_track_remove_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) { lockdep_assert_held_write(&kvm->mmu_lock); lockdep_assert_once(lockdep_is_held(&kvm->slots_lock) || srcu_read_lock_held(&kvm->srcu)); if (KVM_BUG_ON(!kvm_page_track_write_tracking_enabled(kvm), kvm)) return; update_gfn_write_track(slot, gfn, -1); /* * allow large page mapping for the tracked page * after the tracker is gone. */ kvm_mmu_gfn_allow_lpage(slot, gfn); } /* * check if the corresponding access on the specified guest page is tracked. */ bool kvm_gfn_is_write_tracked(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn) { int index; if (!slot) return false; if (!kvm_page_track_write_tracking_enabled(kvm)) return false; index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); return !!READ_ONCE(slot->arch.gfn_write_track[index]); } #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING void kvm_page_track_cleanup(struct kvm *kvm) { struct kvm_page_track_notifier_head *head; head = &kvm->arch.track_notifier_head; cleanup_srcu_struct(&head->track_srcu); } int kvm_page_track_init(struct kvm *kvm) { struct kvm_page_track_notifier_head *head; head = &kvm->arch.track_notifier_head; INIT_HLIST_HEAD(&head->track_notifier_list); return init_srcu_struct(&head->track_srcu); } static int kvm_enable_external_write_tracking(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *slot; int r = 0, i, bkt; mutex_lock(&kvm->slots_arch_lock); /* * Check for *any* write tracking user (not just external users) under * lock. This avoids unnecessary work, e.g. if KVM itself is using * write tracking, or if two external users raced when registering. */ if (kvm_page_track_write_tracking_enabled(kvm)) goto out_success; for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { slots = __kvm_memslots(kvm, i); kvm_for_each_memslot(slot, bkt, slots) { /* * Intentionally do NOT free allocations on failure to * avoid having to track which allocations were made * now versus when the memslot was created. The * metadata is guaranteed to be freed when the slot is * freed, and will be kept/used if userspace retries * the failed ioctl() instead of killing the VM. */ r = kvm_page_track_write_tracking_alloc(slot); if (r) goto out_unlock; } } out_success: /* * Ensure that external_write_tracking_enabled becomes true strictly * after all the related pointers are set. */ smp_store_release(&kvm->arch.external_write_tracking_enabled, true); out_unlock: mutex_unlock(&kvm->slots_arch_lock); return r; } /* * register the notifier so that event interception for the tracked guest * pages can be received. */ int kvm_page_track_register_notifier(struct kvm *kvm, struct kvm_page_track_notifier_node *n) { struct kvm_page_track_notifier_head *head; int r; if (!kvm || kvm->mm != current->mm) return -ESRCH; if (!kvm_external_write_tracking_enabled(kvm)) { r = kvm_enable_external_write_tracking(kvm); if (r) return r; } kvm_get_kvm(kvm); head = &kvm->arch.track_notifier_head; write_lock(&kvm->mmu_lock); hlist_add_head_rcu(&n->node, &head->track_notifier_list); write_unlock(&kvm->mmu_lock); return 0; } EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier); /* * stop receiving the event interception. It is the opposed operation of * kvm_page_track_register_notifier(). */ void kvm_page_track_unregister_notifier(struct kvm *kvm, struct kvm_page_track_notifier_node *n) { struct kvm_page_track_notifier_head *head; head = &kvm->arch.track_notifier_head; write_lock(&kvm->mmu_lock); hlist_del_rcu(&n->node); write_unlock(&kvm->mmu_lock); synchronize_srcu(&head->track_srcu); kvm_put_kvm(kvm); } EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier); /* * Notify the node that write access is intercepted and write emulation is * finished at this time. * * The node should figure out if the written page is the one that node is * interested in by itself. */ void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes) { struct kvm_page_track_notifier_head *head; struct kvm_page_track_notifier_node *n; int idx; head = &kvm->arch.track_notifier_head; if (hlist_empty(&head->track_notifier_list)) return; idx = srcu_read_lock(&head->track_srcu); hlist_for_each_entry_srcu(n, &head->track_notifier_list, node, srcu_read_lock_held(&head->track_srcu)) if (n->track_write) n->track_write(gpa, new, bytes, n); srcu_read_unlock(&head->track_srcu, idx); } /* * Notify external page track nodes that a memory region is being removed from * the VM, e.g. so that users can free any associated metadata. */ void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot) { struct kvm_page_track_notifier_head *head; struct kvm_page_track_notifier_node *n; int idx; head = &kvm->arch.track_notifier_head; if (hlist_empty(&head->track_notifier_list)) return; idx = srcu_read_lock(&head->track_srcu); hlist_for_each_entry_srcu(n, &head->track_notifier_list, node, srcu_read_lock_held(&head->track_srcu)) if (n->track_remove_region) n->track_remove_region(slot->base_gfn, slot->npages, n); srcu_read_unlock(&head->track_srcu, idx); } /* * add guest page to the tracking pool so that corresponding access on that * page will be intercepted. * * @kvm: the guest instance we are interested in. * @gfn: the guest page. */ int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *slot; int idx; idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); if (!slot) { srcu_read_unlock(&kvm->srcu, idx); return -EINVAL; } write_lock(&kvm->mmu_lock); __kvm_write_track_add_gfn(kvm, slot, gfn); write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); return 0; } EXPORT_SYMBOL_GPL(kvm_write_track_add_gfn); /* * remove the guest page from the tracking pool which stops the interception * of corresponding access on that page. * * @kvm: the guest instance we are interested in. * @gfn: the guest page. */ int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *slot; int idx; idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); if (!slot) { srcu_read_unlock(&kvm->srcu, idx); return -EINVAL; } write_lock(&kvm->mmu_lock); __kvm_write_track_remove_gfn(kvm, slot, gfn); write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); return 0; } EXPORT_SYMBOL_GPL(kvm_write_track_remove_gfn); #endif
366 2 366 366 364 365 284 283 283 363 364 363 364 366 364 365 284 2 2 2 2 2 4 2 1 1 4 2 2 366 366 280 282 282 2 84 284 366 281 363 84 84 84 4 4 4 4 4 2 2 2 2 2 2 2 2 2 1 2 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 // SPDX-License-Identifier: GPL-2.0-or-later /* Virtio ring implementation. * * Copyright 2007 Rusty Russell IBM Corporation */ #include <linux/virtio.h> #include <linux/virtio_ring.h> #include <linux/virtio_config.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/hrtimer.h> #include <linux/dma-mapping.h> #include <linux/kmsan.h> #include <linux/spinlock.h> #include <xen/xen.h> #ifdef DEBUG /* For development, we want to crash whenever the ring is screwed. */ #define BAD_RING(_vq, fmt, args...) \ do { \ dev_err(&(_vq)->vq.vdev->dev, \ "%s:"fmt, (_vq)->vq.name, ##args); \ BUG(); \ } while (0) /* Caller is supposed to guarantee no reentry. */ #define START_USE(_vq) \ do { \ if ((_vq)->in_use) \ panic("%s:in_use = %i\n", \ (_vq)->vq.name, (_vq)->in_use); \ (_vq)->in_use = __LINE__; \ } while (0) #define END_USE(_vq) \ do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) #define LAST_ADD_TIME_UPDATE(_vq) \ do { \ ktime_t now = ktime_get(); \ \ /* No kick or get, with .1 second between? Warn. */ \ if ((_vq)->last_add_time_valid) \ WARN_ON(ktime_to_ms(ktime_sub(now, \ (_vq)->last_add_time)) > 100); \ (_vq)->last_add_time = now; \ (_vq)->last_add_time_valid = true; \ } while (0) #define LAST_ADD_TIME_CHECK(_vq) \ do { \ if ((_vq)->last_add_time_valid) { \ WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \ (_vq)->last_add_time)) > 100); \ } \ } while (0) #define LAST_ADD_TIME_INVALID(_vq) \ ((_vq)->last_add_time_valid = false) #else #define BAD_RING(_vq, fmt, args...) \ do { \ dev_err(&_vq->vq.vdev->dev, \ "%s:"fmt, (_vq)->vq.name, ##args); \ (_vq)->broken = true; \ } while (0) #define START_USE(vq) #define END_USE(vq) #define LAST_ADD_TIME_UPDATE(vq) #define LAST_ADD_TIME_CHECK(vq) #define LAST_ADD_TIME_INVALID(vq) #endif struct vring_desc_state_split { void *data; /* Data for callback. */ struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ }; struct vring_desc_state_packed { void *data; /* Data for callback. */ struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ u16 num; /* Descriptor list length. */ u16 last; /* The last desc state in a list. */ }; struct vring_desc_extra { dma_addr_t addr; /* Descriptor DMA addr. */ u32 len; /* Descriptor length. */ u16 flags; /* Descriptor flags. */ u16 next; /* The next desc state in a list. */ }; struct vring_virtqueue_split { /* Actual memory layout for this queue. */ struct vring vring; /* Last written value to avail->flags */ u16 avail_flags_shadow; /* * Last written value to avail->idx in * guest byte order. */ u16 avail_idx_shadow; /* Per-descriptor state. */ struct vring_desc_state_split *desc_state; struct vring_desc_extra *desc_extra; /* DMA address and size information */ dma_addr_t queue_dma_addr; size_t queue_size_in_bytes; /* * The parameters for creating vrings are reserved for creating new * vring. */ u32 vring_align; bool may_reduce_num; }; struct vring_virtqueue_packed { /* Actual memory layout for this queue. */ struct { unsigned int num; struct vring_packed_desc *desc; struct vring_packed_desc_event *driver; struct vring_packed_desc_event *device; } vring; /* Driver ring wrap counter. */ bool avail_wrap_counter; /* Avail used flags. */ u16 avail_used_flags; /* Index of the next avail descriptor. */ u16 next_avail_idx; /* * Last written value to driver->flags in * guest byte order. */ u16 event_flags_shadow; /* Per-descriptor state. */ struct vring_desc_state_packed *desc_state; struct vring_desc_extra *desc_extra; /* DMA address and size information */ dma_addr_t ring_dma_addr; dma_addr_t driver_event_dma_addr; dma_addr_t device_event_dma_addr; size_t ring_size_in_bytes; size_t event_size_in_bytes; }; struct vring_virtqueue { struct virtqueue vq; /* Is this a packed ring? */ bool packed_ring; /* Is DMA API used? */ bool use_dma_api; /* Can we use weak barriers? */ bool weak_barriers; /* Other side has made a mess, don't try any more. */ bool broken; /* Host supports indirect buffers */ bool indirect; /* Host publishes avail event idx */ bool event; /* Do DMA mapping by driver */ bool premapped; /* Do unmap or not for desc. Just when premapped is False and * use_dma_api is true, this is true. */ bool do_unmap; /* Head of free buffer list. */ unsigned int free_head; /* Number we've added since last sync. */ unsigned int num_added; /* Last used index we've seen. * for split ring, it just contains last used index * for packed ring: * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index. * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter. */ u16 last_used_idx; /* Hint for event idx: already triggered no need to disable. */ bool event_triggered; union { /* Available for split ring */ struct vring_virtqueue_split split; /* Available for packed ring */ struct vring_virtqueue_packed packed; }; /* How to notify other side. FIXME: commonalize hcalls! */ bool (*notify)(struct virtqueue *vq); /* DMA, allocation, and size information */ bool we_own_ring; /* Device used for doing DMA */ struct device *dma_dev; #ifdef DEBUG /* They're supposed to lock for us. */ unsigned int in_use; /* Figure out if their kicks are too delayed. */ bool last_add_time_valid; ktime_t last_add_time; #endif }; static struct virtqueue *__vring_new_virtqueue(unsigned int index, struct vring_virtqueue_split *vring_split, struct virtio_device *vdev, bool weak_barriers, bool context, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name, struct device *dma_dev); static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num); static void vring_free(struct virtqueue *_vq); /* * Helpers. */ #define to_vvq(_vq) container_of_const(_vq, struct vring_virtqueue, vq) static bool virtqueue_use_indirect(const struct vring_virtqueue *vq, unsigned int total_sg) { /* * If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ return (vq->indirect && total_sg > 1 && vq->vq.num_free); } /* * Modern virtio devices have feature bits to specify whether they need a * quirk and bypass the IOMMU. If not there, just use the DMA API. * * If there, the interaction between virtio and DMA API is messy. * * On most systems with virtio, physical addresses match bus addresses, * and it doesn't particularly matter whether we use the DMA API. * * On some systems, including Xen and any system with a physical device * that speaks virtio behind a physical IOMMU, we must use the DMA API * for virtio DMA to work at all. * * On other systems, including SPARC and PPC64, virtio-pci devices are * enumerated as though they are behind an IOMMU, but the virtio host * ignores the IOMMU, so we must either pretend that the IOMMU isn't * there or somehow map everything as the identity. * * For the time being, we preserve historic behavior and bypass the DMA * API. * * TODO: install a per-device DMA ops structure that does the right thing * taking into account all the above quirks, and use the DMA API * unconditionally on data path. */ static bool vring_use_dma_api(const struct virtio_device *vdev) { if (!virtio_has_dma_quirk(vdev)) return true; /* Otherwise, we are left to guess. */ /* * In theory, it's possible to have a buggy QEMU-supposed * emulated Q35 IOMMU and Xen enabled at the same time. On * such a configuration, virtio has never worked and will * not work without an even larger kludge. Instead, enable * the DMA API if we're a Xen guest, which at least allows * all of the sensible Xen configurations to work correctly. */ if (xen_domain()) return true; return false; } size_t virtio_max_dma_size(const struct virtio_device *vdev) { size_t max_segment_size = SIZE_MAX; if (vring_use_dma_api(vdev)) max_segment_size = dma_max_mapping_size(vdev->dev.parent); return max_segment_size; } EXPORT_SYMBOL_GPL(virtio_max_dma_size); static void *vring_alloc_queue(struct virtio_device *vdev, size_t size, dma_addr_t *dma_handle, gfp_t flag, struct device *dma_dev) { if (vring_use_dma_api(vdev)) { return dma_alloc_coherent(dma_dev, size, dma_handle, flag); } else { void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag); if (queue) { phys_addr_t phys_addr = virt_to_phys(queue); *dma_handle = (dma_addr_t)phys_addr; /* * Sanity check: make sure we dind't truncate * the address. The only arches I can find that * have 64-bit phys_addr_t but 32-bit dma_addr_t * are certain non-highmem MIPS and x86 * configurations, but these configurations * should never allocate physical pages above 32 * bits, so this is fine. Just in case, throw a * warning and abort if we end up with an * unrepresentable address. */ if (WARN_ON_ONCE(*dma_handle != phys_addr)) { free_pages_exact(queue, PAGE_ALIGN(size)); return NULL; } } return queue; } } static void vring_free_queue(struct virtio_device *vdev, size_t size, void *queue, dma_addr_t dma_handle, struct device *dma_dev) { if (vring_use_dma_api(vdev)) dma_free_coherent(dma_dev, size, queue, dma_handle); else free_pages_exact(queue, PAGE_ALIGN(size)); } /* * The DMA ops on various arches are rather gnarly right now, and * making all of the arch DMA ops work on the vring device itself * is a mess. */ static struct device *vring_dma_dev(const struct vring_virtqueue *vq) { return vq->dma_dev; } /* Map one sg entry. */ static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg, enum dma_data_direction direction, dma_addr_t *addr) { if (vq->premapped) { *addr = sg_dma_address(sg); return 0; } if (!vq->use_dma_api) { /* * If DMA is not used, KMSAN doesn't know that the scatterlist * is initialized by the hardware. Explicitly check/unpoison it * depending on the direction. */ kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction); *addr = (dma_addr_t)sg_phys(sg); return 0; } /* * We can't use dma_map_sg, because we don't use scatterlists in * the way it expects (we don't guarantee that the scatterlist * will exist for the lifetime of the mapping). */ *addr = dma_map_page(vring_dma_dev(vq), sg_page(sg), sg->offset, sg->length, direction); if (dma_mapping_error(vring_dma_dev(vq), *addr)) return -ENOMEM; return 0; } static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, void *cpu_addr, size_t size, enum dma_data_direction direction) { if (!vq->use_dma_api) return (dma_addr_t)virt_to_phys(cpu_addr); return dma_map_single(vring_dma_dev(vq), cpu_addr, size, direction); } static int vring_mapping_error(const struct vring_virtqueue *vq, dma_addr_t addr) { if (!vq->use_dma_api) return 0; return dma_mapping_error(vring_dma_dev(vq), addr); } static void virtqueue_init(struct vring_virtqueue *vq, u32 num) { vq->vq.num_free = num; if (vq->packed_ring) vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR); else vq->last_used_idx = 0; vq->event_triggered = false; vq->num_added = 0; #ifdef DEBUG vq->in_use = false; vq->last_add_time_valid = false; #endif } /* * Split ring specific functions - *_split(). */ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, const struct vring_desc *desc) { u16 flags; if (!vq->do_unmap) return; flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); dma_unmap_page(vring_dma_dev(vq), virtio64_to_cpu(vq->vq.vdev, desc->addr), virtio32_to_cpu(vq->vq.vdev, desc->len), (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, unsigned int i) { struct vring_desc_extra *extra = vq->split.desc_extra; u16 flags; flags = extra[i].flags; if (flags & VRING_DESC_F_INDIRECT) { if (!vq->use_dma_api) goto out; dma_unmap_single(vring_dma_dev(vq), extra[i].addr, extra[i].len, (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } else { if (!vq->do_unmap) goto out; dma_unmap_page(vring_dma_dev(vq), extra[i].addr, extra[i].len, (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } out: return extra[i].next; } static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq, unsigned int total_sg, gfp_t gfp) { struct vring_desc *desc; unsigned int i; /* * We require lowmem mappings for the descriptors because * otherwise virt_to_phys will give us bogus addresses in the * virtqueue. */ gfp &= ~__GFP_HIGHMEM; desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp); if (!desc) return NULL; for (i = 0; i < total_sg; i++) desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); return desc; } static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, struct vring_desc *desc, unsigned int i, dma_addr_t addr, unsigned int len, u16 flags, bool indirect) { struct vring_virtqueue *vring = to_vvq(vq); struct vring_desc_extra *extra = vring->split.desc_extra; u16 next; desc[i].flags = cpu_to_virtio16(vq->vdev, flags); desc[i].addr = cpu_to_virtio64(vq->vdev, addr); desc[i].len = cpu_to_virtio32(vq->vdev, len); if (!indirect) { next = extra[i].next; desc[i].next = cpu_to_virtio16(vq->vdev, next); extra[i].addr = addr; extra[i].len = len; extra[i].flags = flags; } else next = virtio16_to_cpu(vq->vdev, desc[i].next); return next; } static inline int virtqueue_add_split(struct virtqueue *_vq, struct scatterlist *sgs[], unsigned int total_sg, unsigned int out_sgs, unsigned int in_sgs, void *data, void *ctx, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); struct scatterlist *sg; struct vring_desc *desc; unsigned int i, n, avail, descs_used, prev, err_idx; int head; bool indirect; START_USE(vq); BUG_ON(data == NULL); BUG_ON(ctx && vq->indirect); if (unlikely(vq->broken)) { END_USE(vq); return -EIO; } LAST_ADD_TIME_UPDATE(vq); BUG_ON(total_sg == 0); head = vq->free_head; if (virtqueue_use_indirect(vq, total_sg)) desc = alloc_indirect_split(_vq, total_sg, gfp); else { desc = NULL; WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); } if (desc) { /* Use a single buffer which doesn't continue */ indirect = true; /* Set up rest to use this indirect table. */ i = 0; descs_used = 1; } else { indirect = false; desc = vq->split.vring.desc; i = head; descs_used = total_sg; } if (unlikely(vq->vq.num_free < descs_used)) { pr_debug("Can't add buf len %i - avail = %i\n", descs_used, vq->vq.num_free); /* FIXME: for historical reasons, we force a notify here if * there are outgoing parts to the buffer. Presumably the * host should service the ring ASAP. */ if (out_sgs) vq->notify(&vq->vq); if (indirect) kfree(desc); END_USE(vq); return -ENOSPC; } for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { dma_addr_t addr; if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr)) goto unmap_release; prev = i; /* Note that we trust indirect descriptor * table since it use stream DMA mapping. */ i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length, VRING_DESC_F_NEXT, indirect); } } for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { dma_addr_t addr; if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr)) goto unmap_release; prev = i; /* Note that we trust indirect descriptor * table since it use stream DMA mapping. */ i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE, indirect); } } /* Last one doesn't continue. */ desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); if (!indirect && vq->do_unmap) vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= ~VRING_DESC_F_NEXT; if (indirect) { /* Now that the indirect table is filled in, map it. */ dma_addr_t addr = vring_map_single( vq, desc, total_sg * sizeof(struct vring_desc), DMA_TO_DEVICE); if (vring_mapping_error(vq, addr)) { if (vq->premapped) goto free_indirect; goto unmap_release; } virtqueue_add_desc_split(_vq, vq->split.vring.desc, head, addr, total_sg * sizeof(struct vring_desc), VRING_DESC_F_INDIRECT, false); } /* We're using some buffers from the free list. */ vq->vq.num_free -= descs_used; /* Update free pointer */ if (indirect) vq->free_head = vq->split.desc_extra[head].next; else vq->free_head = i; /* Store token and indirect buffer state. */ vq->split.desc_state[head].data = data; if (indirect) vq->split.desc_state[head].indir_desc = desc; else vq->split.desc_state[head].indir_desc = ctx; /* Put entry in available array (but don't update avail->idx until they * do sync). */ avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); /* Descriptors and available array need to be set before we expose the * new available array entries. */ virtio_wmb(vq->weak_barriers); vq->split.avail_idx_shadow++; vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->split.avail_idx_shadow); vq->num_added++; pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); /* This is very unlikely, but theoretically possible. Kick * just in case. */ if (unlikely(vq->num_added == (1 << 16) - 1)) virtqueue_kick(_vq); return 0; unmap_release: err_idx = i; if (indirect) i = 0; else i = head; for (n = 0; n < total_sg; n++) { if (i == err_idx) break; if (indirect) { vring_unmap_one_split_indirect(vq, &desc[i]); i = virtio16_to_cpu(_vq->vdev, desc[i].next); } else i = vring_unmap_one_split(vq, i); } free_indirect: if (indirect) kfree(desc); END_USE(vq); return -ENOMEM; } static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 new, old; bool needs_kick; START_USE(vq); /* We need to expose available array entries before checking avail * event. */ virtio_mb(vq->weak_barriers); old = vq->split.avail_idx_shadow - vq->num_added; new = vq->split.avail_idx_shadow; vq->num_added = 0; LAST_ADD_TIME_CHECK(vq); LAST_ADD_TIME_INVALID(vq); if (vq->event) { needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->split.vring)), new, old); } else { needs_kick = !(vq->split.vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); } END_USE(vq); return needs_kick; } static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, void **ctx) { unsigned int i, j; __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); /* Clear data ptr. */ vq->split.desc_state[head].data = NULL; /* Put back on free list: unmap first-level descriptors and find end */ i = head; while (vq->split.vring.desc[i].flags & nextflag) { vring_unmap_one_split(vq, i); i = vq->split.desc_extra[i].next; vq->vq.num_free++; } vring_unmap_one_split(vq, i); vq->split.desc_extra[i].next = vq->free_head; vq->free_head = head; /* Plus final descriptor */ vq->vq.num_free++; if (vq->indirect) { struct vring_desc *indir_desc = vq->split.desc_state[head].indir_desc; u32 len; /* Free the indirect table, if any, now that it's unmapped. */ if (!indir_desc) return; len = vq->split.desc_extra[head].len; BUG_ON(!(vq->split.desc_extra[head].flags & VRING_DESC_F_INDIRECT)); BUG_ON(len == 0 || len % sizeof(struct vring_desc)); if (vq->do_unmap) { for (j = 0; j < len / sizeof(struct vring_desc); j++) vring_unmap_one_split_indirect(vq, &indir_desc[j]); } kfree(indir_desc); vq->split.desc_state[head].indir_desc = NULL; } else if (ctx) { *ctx = vq->split.desc_state[head].indir_desc; } } static bool more_used_split(const struct vring_virtqueue *vq) { return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->split.vring.used->idx); } static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, unsigned int *len, void **ctx) { struct vring_virtqueue *vq = to_vvq(_vq); void *ret; unsigned int i; u16 last_used; START_USE(vq); if (unlikely(vq->broken)) { END_USE(vq); return NULL; } if (!more_used_split(vq)) { pr_debug("No more buffers in queue\n"); END_USE(vq); return NULL; } /* Only get used array entries after they have been exposed by host. */ virtio_rmb(vq->weak_barriers); last_used = (vq->last_used_idx & (vq->split.vring.num - 1)); i = virtio32_to_cpu(_vq->vdev, vq->split.vring.used->ring[last_used].id); *len = virtio32_to_cpu(_vq->vdev, vq->split.vring.used->ring[last_used].len); if (unlikely(i >= vq->split.vring.num)) { BAD_RING(vq, "id %u out of range\n", i); return NULL; } if (unlikely(!vq->split.desc_state[i].data)) { BAD_RING(vq, "id %u is not a head!\n", i); return NULL; } /* detach_buf_split clears data, so grab it now. */ ret = vq->split.desc_state[i].data; detach_buf_split(vq, i, ctx); vq->last_used_idx++; /* If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before * the read in the next get_buf call. */ if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) virtio_store_mb(vq->weak_barriers, &vring_used_event(&vq->split.vring), cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); LAST_ADD_TIME_INVALID(vq); END_USE(vq); return ret; } static void virtqueue_disable_cb_split(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; /* * If device triggered an event already it won't trigger one again: * no need to disable. */ if (vq->event_triggered) return; if (vq->event) /* TODO: this is a hack. Figure out a cleaner value to write. */ vring_used_event(&vq->split.vring) = 0x0; else vq->split.vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->split.avail_flags_shadow); } } static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 last_used_idx; START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; if (!vq->event) vq->split.vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->split.avail_flags_shadow); } vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); END_USE(vq); return last_used_idx; } static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx) { struct vring_virtqueue *vq = to_vvq(_vq); return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx); } static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 bufs; START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always update the event index to keep code simple. */ if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; if (!vq->event) vq->split.vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->split.avail_flags_shadow); } /* TODO: tune this threshold */ bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4; virtio_store_mb(vq->weak_barriers, &vring_used_event(&vq->split.vring), cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) - vq->last_used_idx) > bufs)) { END_USE(vq); return false; } END_USE(vq); return true; } static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i; void *buf; START_USE(vq); for (i = 0; i < vq->split.vring.num; i++) { if (!vq->split.desc_state[i].data) continue; /* detach_buf_split clears data, so grab it now. */ buf = vq->split.desc_state[i].data; detach_buf_split(vq, i, NULL); vq->split.avail_idx_shadow--; vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->split.avail_idx_shadow); END_USE(vq); return buf; } /* That should have freed everything. */ BUG_ON(vq->vq.num_free != vq->split.vring.num); END_USE(vq); return NULL; } static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split, struct vring_virtqueue *vq) { struct virtio_device *vdev; vdev = vq->vq.vdev; vring_split->avail_flags_shadow = 0; vring_split->avail_idx_shadow = 0; /* No callback? Tell other side not to bother us. */ if (!vq->vq.callback) { vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; if (!vq->event) vring_split->vring.avail->flags = cpu_to_virtio16(vdev, vring_split->avail_flags_shadow); } } static void virtqueue_reinit_split(struct vring_virtqueue *vq) { int num; num = vq->split.vring.num; vq->split.vring.avail->flags = 0; vq->split.vring.avail->idx = 0; /* reset avail event */ vq->split.vring.avail->ring[num] = 0; vq->split.vring.used->flags = 0; vq->split.vring.used->idx = 0; /* reset used event */ *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0; virtqueue_init(vq, num); virtqueue_vring_init_split(&vq->split, vq); } static void virtqueue_vring_attach_split(struct vring_virtqueue *vq, struct vring_virtqueue_split *vring_split) { vq->split = *vring_split; /* Put everything in free lists. */ vq->free_head = 0; } static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split) { struct vring_desc_state_split *state; struct vring_desc_extra *extra; u32 num = vring_split->vring.num; state = kmalloc_array(num, sizeof(struct vring_desc_state_split), GFP_KERNEL); if (!state) goto err_state; extra = vring_alloc_desc_extra(num); if (!extra) goto err_extra; memset(state, 0, num * sizeof(struct vring_desc_state_split)); vring_split->desc_state = state; vring_split->desc_extra = extra; return 0; err_extra: kfree(state); err_state: return -ENOMEM; } static void vring_free_split(struct vring_virtqueue_split *vring_split, struct virtio_device *vdev, struct device *dma_dev) { vring_free_queue(vdev, vring_split->queue_size_in_bytes, vring_split->vring.desc, vring_split->queue_dma_addr, dma_dev); kfree(vring_split->desc_state); kfree(vring_split->desc_extra); } static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split, struct virtio_device *vdev, u32 num, unsigned int vring_align, bool may_reduce_num, struct device *dma_dev) { void *queue = NULL; dma_addr_t dma_addr; /* We assume num is a power of 2. */ if (!is_power_of_2(num)) { dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); return -EINVAL; } /* TODO: allocate each queue chunk individually */ for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { queue = vring_alloc_queue(vdev, vring_size(num, vring_align), &dma_addr, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, dma_dev); if (queue) break; if (!may_reduce_num) return -ENOMEM; } if (!num) return -ENOMEM; if (!queue) { /* Try to get a single page. You are my only hope! */ queue = vring_alloc_queue(vdev, vring_size(num, vring_align), &dma_addr, GFP_KERNEL | __GFP_ZERO, dma_dev); } if (!queue) return -ENOMEM; vring_init(&vring_split->vring, num, queue, vring_align); vring_split->queue_dma_addr = dma_addr; vring_split->queue_size_in_bytes = vring_size(num, vring_align); vring_split->vring_align = vring_align; vring_split->may_reduce_num = may_reduce_num; return 0; } static struct virtqueue *vring_create_virtqueue_split( unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, bool may_reduce_num, bool context, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name, struct device *dma_dev) { struct vring_virtqueue_split vring_split = {}; struct virtqueue *vq; int err; err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align, may_reduce_num, dma_dev); if (err) return NULL; vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers, context, notify, callback, name, dma_dev); if (!vq) { vring_free_split(&vring_split, vdev, dma_dev); return NULL; } to_vvq(vq)->we_own_ring = true; return vq; } static int virtqueue_resize_split(struct virtqueue *_vq, u32 num) { struct vring_virtqueue_split vring_split = {}; struct vring_virtqueue *vq = to_vvq(_vq); struct virtio_device *vdev = _vq->vdev; int err; err = vring_alloc_queue_split(&vring_split, vdev, num, vq->split.vring_align, vq->split.may_reduce_num, vring_dma_dev(vq)); if (err) goto err; err = vring_alloc_state_extra_split(&vring_split); if (err) goto err_state_extra; vring_free(&vq->vq); virtqueue_vring_init_split(&vring_split, vq); virtqueue_init(vq, vring_split.vring.num); virtqueue_vring_attach_split(vq, &vring_split); return 0; err_state_extra: vring_free_split(&vring_split, vdev, vring_dma_dev(vq)); err: virtqueue_reinit_split(vq); return -ENOMEM; } /* * Packed ring specific functions - *_packed(). */ static bool packed_used_wrap_counter(u16 last_used_idx) { return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR)); } static u16 packed_last_used(u16 last_used_idx) { return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR)); } static void vring_unmap_extra_packed(const struct vring_virtqueue *vq, const struct vring_desc_extra *extra) { u16 flags; flags = extra->flags; if (flags & VRING_DESC_F_INDIRECT) { if (!vq->use_dma_api) return; dma_unmap_single(vring_dma_dev(vq), extra->addr, extra->len, (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } else { if (!vq->do_unmap) return; dma_unmap_page(vring_dma_dev(vq), extra->addr, extra->len, (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } } static void vring_unmap_desc_packed(const struct vring_virtqueue *vq, const struct vring_packed_desc *desc) { u16 flags; if (!vq->do_unmap) return; flags = le16_to_cpu(desc->flags); dma_unmap_page(vring_dma_dev(vq), le64_to_cpu(desc->addr), le32_to_cpu(desc->len), (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg, gfp_t gfp) { struct vring_packed_desc *desc; /* * We require lowmem mappings for the descriptors because * otherwise virt_to_phys will give us bogus addresses in the * virtqueue. */ gfp &= ~__GFP_HIGHMEM; desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp); return desc; } static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, struct scatterlist *sgs[], unsigned int total_sg, unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp) { struct vring_packed_desc *desc; struct scatterlist *sg; unsigned int i, n, err_idx; u16 head, id; dma_addr_t addr; head = vq->packed.next_avail_idx; desc = alloc_indirect_packed(total_sg, gfp); if (!desc) return -ENOMEM; if (unlikely(vq->vq.num_free < 1)) { pr_debug("Can't add buf len 1 - avail = 0\n"); kfree(desc); END_USE(vq); return -ENOSPC; } i = 0; id = vq->free_head; BUG_ON(id == vq->packed.vring.num); for (n = 0; n < out_sgs + in_sgs; n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { if (vring_map_one_sg(vq, sg, n < out_sgs ? DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr)) goto unmap_release; desc[i].flags = cpu_to_le16(n < out_sgs ? 0 : VRING_DESC_F_WRITE); desc[i].addr = cpu_to_le64(addr); desc[i].len = cpu_to_le32(sg->length); i++; } } /* Now that the indirect table is filled in, map it. */ addr = vring_map_single(vq, desc, total_sg * sizeof(struct vring_packed_desc), DMA_TO_DEVICE); if (vring_mapping_error(vq, addr)) { if (vq->premapped) goto free_desc; goto unmap_release; } vq->packed.vring.desc[head].addr = cpu_to_le64(addr); vq->packed.vring.desc[head].len = cpu_to_le32(total_sg * sizeof(struct vring_packed_desc)); vq->packed.vring.desc[head].id = cpu_to_le16(id); if (vq->use_dma_api) { vq->packed.desc_extra[id].addr = addr; vq->packed.desc_extra[id].len = total_sg * sizeof(struct vring_packed_desc); vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT | vq->packed.avail_used_flags; } /* * A driver MUST NOT make the first descriptor in the list * available before all subsequent descriptors comprising * the list are made available. */ virtio_wmb(vq->weak_barriers); vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT | vq->packed.avail_used_flags); /* We're using some buffers from the free list. */ vq->vq.num_free -= 1; /* Update free pointer */ n = head + 1; if (n >= vq->packed.vring.num) { n = 0; vq->packed.avail_wrap_counter ^= 1; vq->packed.avail_used_flags ^= 1 << VRING_PACKED_DESC_F_AVAIL | 1 << VRING_PACKED_DESC_F_USED; } vq->packed.next_avail_idx = n; vq->free_head = vq->packed.desc_extra[id].next; /* Store token and indirect buffer state. */ vq->packed.desc_state[id].num = 1; vq->packed.desc_state[id].data = data; vq->packed.desc_state[id].indir_desc = desc; vq->packed.desc_state[id].last = id; vq->num_added += 1; pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); return 0; unmap_release: err_idx = i; for (i = 0; i < err_idx; i++) vring_unmap_desc_packed(vq, &desc[i]); free_desc: kfree(desc); END_USE(vq); return -ENOMEM; } static inline int virtqueue_add_packed(struct virtqueue *_vq, struct scatterlist *sgs[], unsigned int total_sg, unsigned int out_sgs, unsigned int in_sgs, void *data, void *ctx, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); struct vring_packed_desc *desc; struct scatterlist *sg; unsigned int i, n, c, descs_used, err_idx; __le16 head_flags, flags; u16 head, id, prev, curr, avail_used_flags; int err; START_USE(vq); BUG_ON(data == NULL); BUG_ON(ctx && vq->indirect); if (unlikely(vq->broken)) { END_USE(vq); return -EIO; } LAST_ADD_TIME_UPDATE(vq); BUG_ON(total_sg == 0); if (virtqueue_use_indirect(vq, total_sg)) { err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); if (err != -ENOMEM) { END_USE(vq); return err; } /* fall back on direct */ } head = vq->packed.next_avail_idx; avail_used_flags = vq->packed.avail_used_flags; WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect); desc = vq->packed.vring.desc; i = head; descs_used = total_sg; if (unlikely(vq->vq.num_free < descs_used)) { pr_debug("Can't add buf len %i - avail = %i\n", descs_used, vq->vq.num_free); END_USE(vq); return -ENOSPC; } id = vq->free_head; BUG_ON(id == vq->packed.vring.num); curr = id; c = 0; for (n = 0; n < out_sgs + in_sgs; n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { dma_addr_t addr; if (vring_map_one_sg(vq, sg, n < out_sgs ? DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr)) goto unmap_release; flags = cpu_to_le16(vq->packed.avail_used_flags | (++c == total_sg ? 0 : VRING_DESC_F_NEXT) | (n < out_sgs ? 0 : VRING_DESC_F_WRITE)); if (i == head) head_flags = flags; else desc[i].flags = flags; desc[i].addr = cpu_to_le64(addr); desc[i].len = cpu_to_le32(sg->length); desc[i].id = cpu_to_le16(id); if (unlikely(vq->use_dma_api)) { vq->packed.desc_extra[curr].addr = addr; vq->packed.desc_extra[curr].len = sg->length; vq->packed.desc_extra[curr].flags = le16_to_cpu(flags); } prev = curr; curr = vq->packed.desc_extra[curr].next; if ((unlikely(++i >= vq->packed.vring.num))) { i = 0; vq->packed.avail_used_flags ^= 1 << VRING_PACKED_DESC_F_AVAIL | 1 << VRING_PACKED_DESC_F_USED; } } } if (i <= head) vq->packed.avail_wrap_counter ^= 1; /* We're using some buffers from the free list. */ vq->vq.num_free -= descs_used; /* Update free pointer */ vq->packed.next_avail_idx = i; vq->free_head = curr; /* Store token. */ vq->packed.desc_state[id].num = descs_used; vq->packed.desc_state[id].data = data; vq->packed.desc_state[id].indir_desc = ctx; vq->packed.desc_state[id].last = prev; /* * A driver MUST NOT make the first descriptor in the list * available before all subsequent descriptors comprising * the list are made available. */ virtio_wmb(vq->weak_barriers); vq->packed.vring.desc[head].flags = head_flags; vq->num_added += descs_used; pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); return 0; unmap_release: err_idx = i; i = head; curr = vq->free_head; vq->packed.avail_used_flags = avail_used_flags; for (n = 0; n < total_sg; n++) { if (i == err_idx) break; vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]); curr = vq->packed.desc_extra[curr].next; i++; if (i >= vq->packed.vring.num) i = 0; } END_USE(vq); return -EIO; } static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 new, old, off_wrap, flags, wrap_counter, event_idx; bool needs_kick; union { struct { __le16 off_wrap; __le16 flags; }; u32 u32; } snapshot; START_USE(vq); /* * We need to expose the new flags value before checking notification * suppressions. */ virtio_mb(vq->weak_barriers); old = vq->packed.next_avail_idx - vq->num_added; new = vq->packed.next_avail_idx; vq->num_added = 0; snapshot.u32 = *(u32 *)vq->packed.vring.device; flags = le16_to_cpu(snapshot.flags); LAST_ADD_TIME_CHECK(vq); LAST_ADD_TIME_INVALID(vq); if (flags != VRING_PACKED_EVENT_FLAG_DESC) { needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE); goto out; } off_wrap = le16_to_cpu(snapshot.off_wrap); wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR; event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR); if (wrap_counter != vq->packed.avail_wrap_counter) event_idx -= vq->packed.vring.num; needs_kick = vring_need_event(event_idx, new, old); out: END_USE(vq); return needs_kick; } static void detach_buf_packed(struct vring_virtqueue *vq, unsigned int id, void **ctx) { struct vring_desc_state_packed *state = NULL; struct vring_packed_desc *desc; unsigned int i, curr; state = &vq->packed.desc_state[id]; /* Clear data ptr. */ state->data = NULL; vq->packed.desc_extra[state->last].next = vq->free_head; vq->free_head = id; vq->vq.num_free += state->num; if (unlikely(vq->use_dma_api)) { curr = id; for (i = 0; i < state->num; i++) { vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]); curr = vq->packed.desc_extra[curr].next; } } if (vq->indirect) { u32 len; /* Free the indirect table, if any, now that it's unmapped. */ desc = state->indir_desc; if (!desc) return; if (vq->do_unmap) { len = vq->packed.desc_extra[id].len; for (i = 0; i < len / sizeof(struct vring_packed_desc); i++) vring_unmap_desc_packed(vq, &desc[i]); } kfree(desc); state->indir_desc = NULL; } else if (ctx) { *ctx = state->indir_desc; } } static inline bool is_used_desc_packed(const struct vring_virtqueue *vq, u16 idx, bool used_wrap_counter) { bool avail, used; u16 flags; flags = le16_to_cpu(vq->packed.vring.desc[idx].flags); avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL)); used = !!(flags & (1 << VRING_PACKED_DESC_F_USED)); return avail == used && used == used_wrap_counter; } static bool more_used_packed(const struct vring_virtqueue *vq) { u16 last_used; u16 last_used_idx; bool used_wrap_counter; last_used_idx = READ_ONCE(vq->last_used_idx); last_used = packed_last_used(last_used_idx); used_wrap_counter = packed_used_wrap_counter(last_used_idx); return is_used_desc_packed(vq, last_used, used_wrap_counter); } static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, unsigned int *len, void **ctx) { struct vring_virtqueue *vq = to_vvq(_vq); u16 last_used, id, last_used_idx; bool used_wrap_counter; void *ret; START_USE(vq); if (unlikely(vq->broken)) { END_USE(vq); return NULL; } if (!more_used_packed(vq)) { pr_debug("No more buffers in queue\n"); END_USE(vq); return NULL; } /* Only get used elements after they have been exposed by host. */ virtio_rmb(vq->weak_barriers); last_used_idx = READ_ONCE(vq->last_used_idx); used_wrap_counter = packed_used_wrap_counter(last_used_idx); last_used = packed_last_used(last_used_idx); id = le16_to_cpu(vq->packed.vring.desc[last_used].id); *len = le32_to_cpu(vq->packed.vring.desc[last_used].len); if (unlikely(id >= vq->packed.vring.num)) { BAD_RING(vq, "id %u out of range\n", id); return NULL; } if (unlikely(!vq->packed.desc_state[id].data)) { BAD_RING(vq, "id %u is not a head!\n", id); return NULL; } /* detach_buf_packed clears data, so grab it now. */ ret = vq->packed.desc_state[id].data; detach_buf_packed(vq, id, ctx); last_used += vq->packed.desc_state[id].num; if (unlikely(last_used >= vq->packed.vring.num)) { last_used -= vq->packed.vring.num; used_wrap_counter ^= 1; } last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR)); WRITE_ONCE(vq->last_used_idx, last_used); /* * If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before * the read in the next get_buf call. */ if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) virtio_store_mb(vq->weak_barriers, &vq->packed.vring.driver->off_wrap, cpu_to_le16(vq->last_used_idx)); LAST_ADD_TIME_INVALID(vq); END_USE(vq); return ret; } static void virtqueue_disable_cb_packed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) { vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; /* * If device triggered an event already it won't trigger one again: * no need to disable. */ if (vq->event_triggered) return; vq->packed.vring.driver->flags = cpu_to_le16(vq->packed.event_flags_shadow); } } static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); START_USE(vq); /* * We optimistically turn back on interrupts, then check if there was * more to do. */ if (vq->event) { vq->packed.vring.driver->off_wrap = cpu_to_le16(vq->last_used_idx); /* * We need to update event offset and event wrap * counter first before updating event flags. */ virtio_wmb(vq->weak_barriers); } if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { vq->packed.event_flags_shadow = vq->event ? VRING_PACKED_EVENT_FLAG_DESC : VRING_PACKED_EVENT_FLAG_ENABLE; vq->packed.vring.driver->flags = cpu_to_le16(vq->packed.event_flags_shadow); } END_USE(vq); return vq->last_used_idx; } static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap) { struct vring_virtqueue *vq = to_vvq(_vq); bool wrap_counter; u16 used_idx; wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR; used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR); return is_used_desc_packed(vq, used_idx, wrap_counter); } static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 used_idx, wrap_counter, last_used_idx; u16 bufs; START_USE(vq); /* * We optimistically turn back on interrupts, then check if there was * more to do. */ if (vq->event) { /* TODO: tune this threshold */ bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4; last_used_idx = READ_ONCE(vq->last_used_idx); wrap_counter = packed_used_wrap_counter(last_used_idx); used_idx = packed_last_used(last_used_idx) + bufs; if (used_idx >= vq->packed.vring.num) { used_idx -= vq->packed.vring.num; wrap_counter ^= 1; } vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx | (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR)); /* * We need to update event offset and event wrap * counter first before updating event flags. */ virtio_wmb(vq->weak_barriers); } if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { vq->packed.event_flags_shadow = vq->event ? VRING_PACKED_EVENT_FLAG_DESC : VRING_PACKED_EVENT_FLAG_ENABLE; vq->packed.vring.driver->flags = cpu_to_le16(vq->packed.event_flags_shadow); } /* * We need to update event suppression structure first * before re-checking for more used buffers. */ virtio_mb(vq->weak_barriers); last_used_idx = READ_ONCE(vq->last_used_idx); wrap_counter = packed_used_wrap_counter(last_used_idx); used_idx = packed_last_used(last_used_idx); if (is_used_desc_packed(vq, used_idx, wrap_counter)) { END_USE(vq); return false; } END_USE(vq); return true; } static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i; void *buf; START_USE(vq); for (i = 0; i < vq->packed.vring.num; i++) { if (!vq->packed.desc_state[i].data) continue; /* detach_buf clears data, so grab it now. */ buf = vq->packed.desc_state[i].data; detach_buf_packed(vq, i, NULL); END_USE(vq); return buf; } /* That should have freed everything. */ BUG_ON(vq->vq.num_free != vq->packed.vring.num); END_USE(vq); return NULL; } static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num) { struct vring_desc_extra *desc_extra; unsigned int i; desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra), GFP_KERNEL); if (!desc_extra) return NULL; memset(desc_extra, 0, num * sizeof(struct vring_desc_extra)); for (i = 0; i < num - 1; i++) desc_extra[i].next = i + 1; return desc_extra; } static void vring_free_packed(struct vring_virtqueue_packed *vring_packed, struct virtio_device *vdev, struct device *dma_dev) { if (vring_packed->vring.desc) vring_free_queue(vdev, vring_packed->ring_size_in_bytes, vring_packed->vring.desc, vring_packed->ring_dma_addr, dma_dev); if (vring_packed->vring.driver) vring_free_queue(vdev, vring_packed->event_size_in_bytes, vring_packed->vring.driver, vring_packed->driver_event_dma_addr, dma_dev); if (vring_packed->vring.device) vring_free_queue(vdev, vring_packed->event_size_in_bytes, vring_packed->vring.device, vring_packed->device_event_dma_addr, dma_dev); kfree(vring_packed->desc_state); kfree(vring_packed->desc_extra); } static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed, struct virtio_device *vdev, u32 num, struct device *dma_dev) { struct vring_packed_desc *ring; struct vring_packed_desc_event *driver, *device; dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr; size_t ring_size_in_bytes, event_size_in_bytes; ring_size_in_bytes = num * sizeof(struct vring_packed_desc); ring = vring_alloc_queue(vdev, ring_size_in_bytes, &ring_dma_addr, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, dma_dev); if (!ring) goto err; vring_packed->vring.desc = ring; vring_packed->ring_dma_addr = ring_dma_addr; vring_packed->ring_size_in_bytes = ring_size_in_bytes; event_size_in_bytes = sizeof(struct vring_packed_desc_event); driver = vring_alloc_queue(vdev, event_size_in_bytes, &driver_event_dma_addr, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, dma_dev); if (!driver) goto err; vring_packed->vring.driver = driver; vring_packed->event_size_in_bytes = event_size_in_bytes; vring_packed->driver_event_dma_addr = driver_event_dma_addr; device = vring_alloc_queue(vdev, event_size_in_bytes, &device_event_dma_addr, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, dma_dev); if (!device) goto err; vring_packed->vring.device = device; vring_packed->device_event_dma_addr = device_event_dma_addr; vring_packed->vring.num = num; return 0; err: vring_free_packed(vring_packed, vdev, dma_dev); return -ENOMEM; } static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed) { struct vring_desc_state_packed *state; struct vring_desc_extra *extra; u32 num = vring_packed->vring.num; state = kmalloc_array(num, sizeof(struct vring_desc_state_packed), GFP_KERNEL); if (!state) goto err_desc_state; memset(state, 0, num * sizeof(struct vring_desc_state_packed)); extra = vring_alloc_desc_extra(num); if (!extra) goto err_desc_extra; vring_packed->desc_state = state; vring_packed->desc_extra = extra; return 0; err_desc_extra: kfree(state); err_desc_state: return -ENOMEM; } static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed, bool callback) { vring_packed->next_avail_idx = 0; vring_packed->avail_wrap_counter = 1; vring_packed->event_flags_shadow = 0; vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL; /* No callback? Tell other side not to bother us. */ if (!callback) { vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; vring_packed->vring.driver->flags = cpu_to_le16(vring_packed->event_flags_shadow); } } static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq, struct vring_virtqueue_packed *vring_packed) { vq->packed = *vring_packed; /* Put everything in free lists. */ vq->free_head = 0; } static void virtqueue_reinit_packed(struct vring_virtqueue *vq) { memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes); memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes); /* we need to reset the desc.flags. For more, see is_used_desc_packed() */ memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes); virtqueue_init(vq, vq->packed.vring.num); virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback); } static struct virtqueue *vring_create_virtqueue_packed( unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, bool may_reduce_num, bool context, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name, struct device *dma_dev) { struct vring_virtqueue_packed vring_packed = {}; struct vring_virtqueue *vq; int err; if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev)) goto err_ring; vq = kmalloc(sizeof(*vq), GFP_KERNEL); if (!vq) goto err_vq; vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; vq->vq.index = index; vq->vq.reset = false; vq->we_own_ring = true; vq->notify = notify; vq->weak_barriers = weak_barriers; #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION vq->broken = true; #else vq->broken = false; #endif vq->packed_ring = true; vq->dma_dev = dma_dev; vq->use_dma_api = vring_use_dma_api(vdev); vq->premapped = false; vq->do_unmap = vq->use_dma_api; vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && !context; vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) vq->weak_barriers = false; err = vring_alloc_state_extra_packed(&vring_packed); if (err) goto err_state_extra; virtqueue_vring_init_packed(&vring_packed, !!callback); virtqueue_init(vq, num); virtqueue_vring_attach_packed(vq, &vring_packed); spin_lock(&vdev->vqs_list_lock); list_add_tail(&vq->vq.list, &vdev->vqs); spin_unlock(&vdev->vqs_list_lock); return &vq->vq; err_state_extra: kfree(vq); err_vq: vring_free_packed(&vring_packed, vdev, dma_dev); err_ring: return NULL; } static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num) { struct vring_virtqueue_packed vring_packed = {}; struct vring_virtqueue *vq = to_vvq(_vq); struct virtio_device *vdev = _vq->vdev; int err; if (vring_alloc_queue_packed(&vring_packed, vdev, num, vring_dma_dev(vq))) goto err_ring; err = vring_alloc_state_extra_packed(&vring_packed); if (err) goto err_state_extra; vring_free(&vq->vq); virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback); virtqueue_init(vq, vring_packed.vring.num); virtqueue_vring_attach_packed(vq, &vring_packed); return 0; err_state_extra: vring_free_packed(&vring_packed, vdev, vring_dma_dev(vq)); err_ring: virtqueue_reinit_packed(vq); return -ENOMEM; } static int virtqueue_disable_and_recycle(struct virtqueue *_vq, void (*recycle)(struct virtqueue *vq, void *buf)) { struct vring_virtqueue *vq = to_vvq(_vq); struct virtio_device *vdev = vq->vq.vdev; void *buf; int err; if (!vq->we_own_ring) return -EPERM; if (!vdev->config->disable_vq_and_reset) return -ENOENT; if (!vdev->config->enable_vq_after_reset) return -ENOENT; err = vdev->config->disable_vq_and_reset(_vq); if (err) return err; while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL) recycle(_vq, buf); return 0; } static int virtqueue_enable_after_reset(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); struct virtio_device *vdev = vq->vq.vdev; if (vdev->config->enable_vq_after_reset(_vq)) return -EBUSY; return 0; } /* * Generic functions and exported symbols. */ static inline int virtqueue_add(struct virtqueue *_vq, struct scatterlist *sgs[], unsigned int total_sg, unsigned int out_sgs, unsigned int in_sgs, void *data, void *ctx, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg, out_sgs, in_sgs, data, ctx, gfp) : virtqueue_add_split(_vq, sgs, total_sg, out_sgs, in_sgs, data, ctx, gfp); } /** * virtqueue_add_sgs - expose buffers to other end * @_vq: the struct virtqueue we're talking about. * @sgs: array of terminated scatterlists. * @out_sgs: the number of scatterlists readable by other side * @in_sgs: the number of scatterlists which are writable (after readable ones) * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */ int virtqueue_add_sgs(struct virtqueue *_vq, struct scatterlist *sgs[], unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp) { unsigned int i, total_sg = 0; /* Count them first. */ for (i = 0; i < out_sgs + in_sgs; i++) { struct scatterlist *sg; for (sg = sgs[i]; sg; sg = sg_next(sg)) total_sg++; } return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, NULL, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_sgs); /** * virtqueue_add_outbuf - expose output buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg readable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */ int virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg, unsigned int num, void *data, gfp_t gfp) { return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); /** * virtqueue_add_inbuf - expose input buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg writable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */ int virtqueue_add_inbuf(struct virtqueue *vq, struct scatterlist *sg, unsigned int num, void *data, gfp_t gfp) { return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); /** * virtqueue_add_inbuf_ctx - expose input buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg writable by other side * @data: the token identifying the buffer. * @ctx: extra context for the token * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */ int virtqueue_add_inbuf_ctx(struct virtqueue *vq, struct scatterlist *sg, unsigned int num, void *data, void *ctx, gfp_t gfp) { return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx); /** * virtqueue_dma_dev - get the dma dev * @_vq: the struct virtqueue we're talking about. * * Returns the dma dev. That can been used for dma api. */ struct device *virtqueue_dma_dev(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (vq->use_dma_api) return vring_dma_dev(vq); else return NULL; } EXPORT_SYMBOL_GPL(virtqueue_dma_dev); /** * virtqueue_kick_prepare - first half of split virtqueue_kick call. * @_vq: the struct virtqueue * * Instead of virtqueue_kick(), you can do: * if (virtqueue_kick_prepare(vq)) * virtqueue_notify(vq); * * This is sometimes useful because the virtqueue_kick_prepare() needs * to be serialized, but the actual virtqueue_notify() call does not. */ bool virtqueue_kick_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) : virtqueue_kick_prepare_split(_vq); } EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); /** * virtqueue_notify - second half of split virtqueue_kick call. * @_vq: the struct virtqueue * * This does not need to be serialized. * * Returns false if host notify failed or queue is broken, otherwise true. */ bool virtqueue_notify(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (unlikely(vq->broken)) return false; /* Prod other side to tell it about changes. */ if (!vq->notify(_vq)) { vq->broken = true; return false; } return true; } EXPORT_SYMBOL_GPL(virtqueue_notify); /** * virtqueue_kick - update after add_buf * @vq: the struct virtqueue * * After one or more virtqueue_add_* calls, invoke this to kick * the other side. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). * * Returns false if kick failed, otherwise true. */ bool virtqueue_kick(struct virtqueue *vq) { if (virtqueue_kick_prepare(vq)) return virtqueue_notify(vq); return true; } EXPORT_SYMBOL_GPL(virtqueue_kick); /** * virtqueue_get_buf_ctx - get the next used buffer * @_vq: the struct virtqueue we're talking about. * @len: the length written into the buffer * @ctx: extra context for the token * * If the device wrote data into the buffer, @len will be set to the * amount written. This means you don't need to clear the buffer * beforehand to ensure there's no data leakage in the case of short * writes. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). * * Returns NULL if there are no used buffers, or the "data" token * handed to virtqueue_add_*(). */ void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, void **ctx) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) : virtqueue_get_buf_ctx_split(_vq, len, ctx); } EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx); void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) { return virtqueue_get_buf_ctx(_vq, len, NULL); } EXPORT_SYMBOL_GPL(virtqueue_get_buf); /** * virtqueue_disable_cb - disable callbacks * @_vq: the struct virtqueue we're talking about. * * Note that this is not necessarily synchronous, hence unreliable and only * useful as an optimization. * * Unlike other operations, this need not be serialized. */ void virtqueue_disable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (vq->packed_ring) virtqueue_disable_cb_packed(_vq); else virtqueue_disable_cb_split(_vq); } EXPORT_SYMBOL_GPL(virtqueue_disable_cb); /** * virtqueue_enable_cb_prepare - restart callbacks after disable_cb * @_vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns current queue state * in an opaque unsigned value. This value should be later tested by * virtqueue_poll, to detect a possible race between the driver checking for * more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (vq->event_triggered) vq->event_triggered = false; return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) : virtqueue_enable_cb_prepare_split(_vq); } EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); /** * virtqueue_poll - query pending used buffers * @_vq: the struct virtqueue we're talking about. * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). * * Returns "true" if there are pending used buffers in the queue. * * This does not need to be serialized. */ bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx) { struct vring_virtqueue *vq = to_vvq(_vq); if (unlikely(vq->broken)) return false; virtio_mb(vq->weak_barriers); return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : virtqueue_poll_split(_vq, last_used_idx); } EXPORT_SYMBOL_GPL(virtqueue_poll); /** * virtqueue_enable_cb - restart callbacks after disable_cb. * @_vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns "false" if there are pending * buffers in the queue, to detect a possible race between the driver * checking for more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ bool virtqueue_enable_cb(struct virtqueue *_vq) { unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq); return !virtqueue_poll(_vq, last_used_idx); } EXPORT_SYMBOL_GPL(virtqueue_enable_cb); /** * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. * @_vq: the struct virtqueue we're talking about. * * This re-enables callbacks but hints to the other side to delay * interrupts until most of the available buffers have been processed; * it returns "false" if there are many pending buffers in the queue, * to detect a possible race between the driver checking for more work, * and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (vq->event_triggered) vq->event_triggered = false; return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : virtqueue_enable_cb_delayed_split(_vq); } EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); /** * virtqueue_detach_unused_buf - detach first unused buffer * @_vq: the struct virtqueue we're talking about. * * Returns NULL or the "data" token handed to virtqueue_add_*(). * This is not valid on an active queue; it is useful for device * shutdown or the reset queue. */ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) : virtqueue_detach_unused_buf_split(_vq); } EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); static inline bool more_used(const struct vring_virtqueue *vq) { return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq); } /** * vring_interrupt - notify a virtqueue on an interrupt * @irq: the IRQ number (ignored) * @_vq: the struct virtqueue to notify * * Calls the callback function of @_vq to process the virtqueue * notification. */ irqreturn_t vring_interrupt(int irq, void *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (!more_used(vq)) { pr_debug("virtqueue interrupt with no work for %p\n", vq); return IRQ_NONE; } if (unlikely(vq->broken)) { #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION dev_warn_once(&vq->vq.vdev->dev, "virtio vring IRQ raised before DRIVER_OK"); return IRQ_NONE; #else return IRQ_HANDLED; #endif } /* Just a hint for performance: so it's ok that this can be racy! */ if (vq->event) vq->event_triggered = true; pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); if (vq->vq.callback) vq->vq.callback(&vq->vq); return IRQ_HANDLED; } EXPORT_SYMBOL_GPL(vring_interrupt); /* Only available for split ring */ static struct virtqueue *__vring_new_virtqueue(unsigned int index, struct vring_virtqueue_split *vring_split, struct virtio_device *vdev, bool weak_barriers, bool context, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name, struct device *dma_dev) { struct vring_virtqueue *vq; int err; if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) return NULL; vq = kmalloc(sizeof(*vq), GFP_KERNEL); if (!vq) return NULL; vq->packed_ring = false; vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; vq->vq.index = index; vq->vq.reset = false; vq->we_own_ring = false; vq->notify = notify; vq->weak_barriers = weak_barriers; #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION vq->broken = true; #else vq->broken = false; #endif vq->dma_dev = dma_dev; vq->use_dma_api = vring_use_dma_api(vdev); vq->premapped = false; vq->do_unmap = vq->use_dma_api; vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && !context; vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) vq->weak_barriers = false; err = vring_alloc_state_extra_split(vring_split); if (err) { kfree(vq); return NULL; } virtqueue_vring_init_split(vring_split, vq); virtqueue_init(vq, vring_split->vring.num); virtqueue_vring_attach_split(vq, vring_split); spin_lock(&vdev->vqs_list_lock); list_add_tail(&vq->vq.list, &vdev->vqs); spin_unlock(&vdev->vqs_list_lock); return &vq->vq; } struct virtqueue *vring_create_virtqueue( unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, bool may_reduce_num, bool context, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name) { if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) return vring_create_virtqueue_packed(index, num, vring_align, vdev, weak_barriers, may_reduce_num, context, notify, callback, name, vdev->dev.parent); return vring_create_virtqueue_split(index, num, vring_align, vdev, weak_barriers, may_reduce_num, context, notify, callback, name, vdev->dev.parent); } EXPORT_SYMBOL_GPL(vring_create_virtqueue); struct virtqueue *vring_create_virtqueue_dma( unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, bool may_reduce_num, bool context, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name, struct device *dma_dev) { if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) return vring_create_virtqueue_packed(index, num, vring_align, vdev, weak_barriers, may_reduce_num, context, notify, callback, name, dma_dev); return vring_create_virtqueue_split(index, num, vring_align, vdev, weak_barriers, may_reduce_num, context, notify, callback, name, dma_dev); } EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma); /** * virtqueue_resize - resize the vring of vq * @_vq: the struct virtqueue we're talking about. * @num: new ring num * @recycle: callback to recycle unused buffers * * When it is really necessary to create a new vring, it will set the current vq * into the reset state. Then call the passed callback to recycle the buffer * that is no longer used. Only after the new vring is successfully created, the * old vring will be released. * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error. * 0: success. * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size. * vq can still work normally * -EBUSY: Failed to sync with device, vq may not work properly * -ENOENT: Transport or device not supported * -E2BIG/-EINVAL: num error * -EPERM: Operation not permitted * */ int virtqueue_resize(struct virtqueue *_vq, u32 num, void (*recycle)(struct virtqueue *vq, void *buf)) { struct vring_virtqueue *vq = to_vvq(_vq); int err; if (num > vq->vq.num_max) return -E2BIG; if (!num) return -EINVAL; if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num) return 0; err = virtqueue_disable_and_recycle(_vq, recycle); if (err) return err; if (vq->packed_ring) err = virtqueue_resize_packed(_vq, num); else err = virtqueue_resize_split(_vq, num); return virtqueue_enable_after_reset(_vq); } EXPORT_SYMBOL_GPL(virtqueue_resize); /** * virtqueue_set_dma_premapped - set the vring premapped mode * @_vq: the struct virtqueue we're talking about. * * Enable the premapped mode of the vq. * * The vring in premapped mode does not do dma internally, so the driver must * do dma mapping in advance. The driver must pass the dma_address through * dma_address of scatterlist. When the driver got a used buffer from * the vring, it has to unmap the dma address. * * This function must be called immediately after creating the vq, or after vq * reset, and before adding any buffers to it. * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error. * 0: success. * -EINVAL: too late to enable premapped mode, the vq already contains buffers. */ int virtqueue_set_dma_premapped(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u32 num; START_USE(vq); num = vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num; if (num != vq->vq.num_free) { END_USE(vq); return -EINVAL; } vq->premapped = true; vq->do_unmap = false; END_USE(vq); return 0; } EXPORT_SYMBOL_GPL(virtqueue_set_dma_premapped); /** * virtqueue_reset - detach and recycle all unused buffers * @_vq: the struct virtqueue we're talking about. * @recycle: callback to recycle unused buffers * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error. * 0: success. * -EBUSY: Failed to sync with device, vq may not work properly * -ENOENT: Transport or device not supported * -EPERM: Operation not permitted */ int virtqueue_reset(struct virtqueue *_vq, void (*recycle)(struct virtqueue *vq, void *buf)) { struct vring_virtqueue *vq = to_vvq(_vq); int err; err = virtqueue_disable_and_recycle(_vq, recycle); if (err) return err; if (vq->packed_ring) virtqueue_reinit_packed(vq); else virtqueue_reinit_split(vq); return virtqueue_enable_after_reset(_vq); } EXPORT_SYMBOL_GPL(virtqueue_reset); /* Only available for split ring */ struct virtqueue *vring_new_virtqueue(unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, bool context, void *pages, bool (*notify)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq), const char *name) { struct vring_virtqueue_split vring_split = {}; if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) return NULL; vring_init(&vring_split.vring, num, pages, vring_align); return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers, context, notify, callback, name, vdev->dev.parent); } EXPORT_SYMBOL_GPL(vring_new_virtqueue); static void vring_free(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (vq->we_own_ring) { if (vq->packed_ring) { vring_free_queue(vq->vq.vdev, vq->packed.ring_size_in_bytes, vq->packed.vring.desc, vq->packed.ring_dma_addr, vring_dma_dev(vq)); vring_free_queue(vq->vq.vdev, vq->packed.event_size_in_bytes, vq->packed.vring.driver, vq->packed.driver_event_dma_addr, vring_dma_dev(vq)); vring_free_queue(vq->vq.vdev, vq->packed.event_size_in_bytes, vq->packed.vring.device, vq->packed.device_event_dma_addr, vring_dma_dev(vq)); kfree(vq->packed.desc_state); kfree(vq->packed.desc_extra); } else { vring_free_queue(vq->vq.vdev, vq->split.queue_size_in_bytes, vq->split.vring.desc, vq->split.queue_dma_addr, vring_dma_dev(vq)); } } if (!vq->packed_ring) { kfree(vq->split.desc_state); kfree(vq->split.desc_extra); } } void vring_del_virtqueue(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); spin_lock(&vq->vq.vdev->vqs_list_lock); list_del(&_vq->list); spin_unlock(&vq->vq.vdev->vqs_list_lock); vring_free(_vq); kfree(vq); } EXPORT_SYMBOL_GPL(vring_del_virtqueue); u32 vring_notification_data(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 next; if (vq->packed_ring) next = (vq->packed.next_avail_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))) | vq->packed.avail_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR; else next = vq->split.avail_idx_shadow; return next << 16 | _vq->index; } EXPORT_SYMBOL_GPL(vring_notification_data); /* Manipulates transport-specific feature bits. */ void vring_transport_features(struct virtio_device *vdev) { unsigned int i; for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { switch (i) { case VIRTIO_RING_F_INDIRECT_DESC: break; case VIRTIO_RING_F_EVENT_IDX: break; case VIRTIO_F_VERSION_1: break; case VIRTIO_F_ACCESS_PLATFORM: break; case VIRTIO_F_RING_PACKED: break; case VIRTIO_F_ORDER_PLATFORM: break; case VIRTIO_F_NOTIFICATION_DATA: break; default: /* We don't understand this bit. */ __virtio_clear_bit(vdev, i); } } } EXPORT_SYMBOL_GPL(vring_transport_features); /** * virtqueue_get_vring_size - return the size of the virtqueue's vring * @_vq: the struct virtqueue containing the vring of interest. * * Returns the size of the vring. This is mainly used for boasting to * userspace. Unlike other operations, this need not be serialized. */ unsigned int virtqueue_get_vring_size(const struct virtqueue *_vq) { const struct vring_virtqueue *vq = to_vvq(_vq); return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num; } EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); /* * This function should only be called by the core, not directly by the driver. */ void __virtqueue_break(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ WRITE_ONCE(vq->broken, true); } EXPORT_SYMBOL_GPL(__virtqueue_break); /* * This function should only be called by the core, not directly by the driver. */ void __virtqueue_unbreak(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ WRITE_ONCE(vq->broken, false); } EXPORT_SYMBOL_GPL(__virtqueue_unbreak); bool virtqueue_is_broken(const struct virtqueue *_vq) { const struct vring_virtqueue *vq = to_vvq(_vq); return READ_ONCE(vq->broken); } EXPORT_SYMBOL_GPL(virtqueue_is_broken); /* * This should prevent the device from being used, allowing drivers to * recover. You may need to grab appropriate locks to flush. */ void virtio_break_device(struct virtio_device *dev) { struct virtqueue *_vq; spin_lock(&dev->vqs_list_lock); list_for_each_entry(_vq, &dev->vqs, list) { struct vring_virtqueue *vq = to_vvq(_vq); /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ WRITE_ONCE(vq->broken, true); } spin_unlock(&dev->vqs_list_lock); } EXPORT_SYMBOL_GPL(virtio_break_device); /* * This should allow the device to be used by the driver. You may * need to grab appropriate locks to flush the write to * vq->broken. This should only be used in some specific case e.g * (probing and restoring). This function should only be called by the * core, not directly by the driver. */ void __virtio_unbreak_device(struct virtio_device *dev) { struct virtqueue *_vq; spin_lock(&dev->vqs_list_lock); list_for_each_entry(_vq, &dev->vqs, list) { struct vring_virtqueue *vq = to_vvq(_vq); /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ WRITE_ONCE(vq->broken, false); } spin_unlock(&dev->vqs_list_lock); } EXPORT_SYMBOL_GPL(__virtio_unbreak_device); dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *_vq) { const struct vring_virtqueue *vq = to_vvq(_vq); BUG_ON(!vq->we_own_ring); if (vq->packed_ring) return vq->packed.ring_dma_addr; return vq->split.queue_dma_addr; } EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr); dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *_vq) { const struct vring_virtqueue *vq = to_vvq(_vq); BUG_ON(!vq->we_own_ring); if (vq->packed_ring) return vq->packed.driver_event_dma_addr; return vq->split.queue_dma_addr + ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); } EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr); dma_addr_t virtqueue_get_used_addr(const struct virtqueue *_vq) { const struct vring_virtqueue *vq = to_vvq(_vq); BUG_ON(!vq->we_own_ring); if (vq->packed_ring) return vq->packed.device_event_dma_addr; return vq->split.queue_dma_addr + ((char *)vq->split.vring.used - (char *)vq->split.vring.desc); } EXPORT_SYMBOL_GPL(virtqueue_get_used_addr); /* Only available for split ring */ const struct vring *virtqueue_get_vring(const struct virtqueue *vq) { return &to_vvq(vq)->split.vring; } EXPORT_SYMBOL_GPL(virtqueue_get_vring); /** * virtqueue_dma_map_single_attrs - map DMA for _vq * @_vq: the struct virtqueue we're talking about. * @ptr: the pointer of the buffer to do dma * @size: the size of the buffer to do dma * @dir: DMA direction * @attrs: DMA Attrs * * The caller calls this to do dma mapping in advance. The DMA address can be * passed to this _vq when it is in pre-mapped mode. * * return DMA address. Caller should check that by virtqueue_dma_mapping_error(). */ dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct vring_virtqueue *vq = to_vvq(_vq); if (!vq->use_dma_api) { kmsan_handle_dma(virt_to_page(ptr), offset_in_page(ptr), size, dir); return (dma_addr_t)virt_to_phys(ptr); } return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs); } EXPORT_SYMBOL_GPL(virtqueue_dma_map_single_attrs); /** * virtqueue_dma_unmap_single_attrs - unmap DMA for _vq * @_vq: the struct virtqueue we're talking about. * @addr: the dma address to unmap * @size: the size of the buffer * @dir: DMA direction * @attrs: DMA Attrs * * Unmap the address that is mapped by the virtqueue_dma_map_* APIs. * */ void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct vring_virtqueue *vq = to_vvq(_vq); if (!vq->use_dma_api) return; dma_unmap_single_attrs(vring_dma_dev(vq), addr, size, dir, attrs); } EXPORT_SYMBOL_GPL(virtqueue_dma_unmap_single_attrs); /** * virtqueue_dma_mapping_error - check dma address * @_vq: the struct virtqueue we're talking about. * @addr: DMA address * * Returns 0 means dma valid. Other means invalid dma address. */ int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr) { struct vring_virtqueue *vq = to_vvq(_vq); if (!vq->use_dma_api) return 0; return dma_mapping_error(vring_dma_dev(vq), addr); } EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error); /** * virtqueue_dma_need_sync - check a dma address needs sync * @_vq: the struct virtqueue we're talking about. * @addr: DMA address * * Check if the dma address mapped by the virtqueue_dma_map_* APIs needs to be * synchronized * * return bool */ bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr) { struct vring_virtqueue *vq = to_vvq(_vq); if (!vq->use_dma_api) return false; return dma_need_sync(vring_dma_dev(vq), addr); } EXPORT_SYMBOL_GPL(virtqueue_dma_need_sync); /** * virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu * @_vq: the struct virtqueue we're talking about. * @addr: DMA address * @offset: DMA address offset * @size: buf size for sync * @dir: DMA direction * * Before calling this function, use virtqueue_dma_need_sync() to confirm that * the DMA address really needs to be synchronized * */ void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr, unsigned long offset, size_t size, enum dma_data_direction dir) { struct vring_virtqueue *vq = to_vvq(_vq); struct device *dev = vring_dma_dev(vq); if (!vq->use_dma_api) return; dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); } EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_cpu); /** * virtqueue_dma_sync_single_range_for_device - dma sync for device * @_vq: the struct virtqueue we're talking about. * @addr: DMA address * @offset: DMA address offset * @size: buf size for sync * @dir: DMA direction * * Before calling this function, use virtqueue_dma_need_sync() to confirm that * the DMA address really needs to be synchronized */ void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr, unsigned long offset, size_t size, enum dma_data_direction dir) { struct vring_virtqueue *vq = to_vvq(_vq); struct device *dev = vring_dma_dev(vq); if (!vq->use_dma_api) return; dma_sync_single_range_for_device(dev, addr, offset, size, dir); } EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_device); MODULE_DESCRIPTION("Virtio ring implementation"); MODULE_LICENSE("GPL");
9 9 9 6 7 3 6 6 7 7 7 7 9 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 // SPDX-License-Identifier: GPL-2.0 /* usb-urb.c is part of the DVB USB library. * * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * This file keeps functions for initializing and handling the * BULK and ISOC USB data transfers in a generic way. * Can be used for DVB-only and also, that's the plan, for * Hybrid USB devices (analog and DVB). */ #include "dvb_usb_common.h" /* URB stuff for streaming */ int usb_urb_reconfig(struct usb_data_stream *stream, struct usb_data_stream_properties *props); static void usb_urb_complete(struct urb *urb) { struct usb_data_stream *stream = urb->context; int ptype = usb_pipetype(urb->pipe); int i; u8 *b; dev_dbg_ratelimited(&stream->udev->dev, "%s: %s urb completed status=%d length=%d/%d pack_num=%d errors=%d\n", __func__, ptype == PIPE_ISOCHRONOUS ? "isoc" : "bulk", urb->status, urb->actual_length, urb->transfer_buffer_length, urb->number_of_packets, urb->error_count); switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ dev_dbg_ratelimited(&stream->udev->dev, "%s: urb completion failed=%d\n", __func__, urb->status); break; } b = (u8 *) urb->transfer_buffer; switch (ptype) { case PIPE_ISOCHRONOUS: for (i = 0; i < urb->number_of_packets; i++) { if (urb->iso_frame_desc[i].status != 0) dev_dbg(&stream->udev->dev, "%s: iso frame descriptor has an error=%d\n", __func__, urb->iso_frame_desc[i].status); else if (urb->iso_frame_desc[i].actual_length > 0) stream->complete(stream, b + urb->iso_frame_desc[i].offset, urb->iso_frame_desc[i].actual_length); urb->iso_frame_desc[i].status = 0; urb->iso_frame_desc[i].actual_length = 0; } break; case PIPE_BULK: if (urb->actual_length > 0) stream->complete(stream, b, urb->actual_length); break; default: dev_err(&stream->udev->dev, "%s: unknown endpoint type in completion handler\n", KBUILD_MODNAME); return; } usb_submit_urb(urb, GFP_ATOMIC); } int usb_urb_killv2(struct usb_data_stream *stream) { int i; for (i = 0; i < stream->urbs_submitted; i++) { dev_dbg(&stream->udev->dev, "%s: kill urb=%d\n", __func__, i); /* stop the URB */ usb_kill_urb(stream->urb_list[i]); } stream->urbs_submitted = 0; return 0; } int usb_urb_submitv2(struct usb_data_stream *stream, struct usb_data_stream_properties *props) { int i, ret; if (props) { ret = usb_urb_reconfig(stream, props); if (ret < 0) return ret; } for (i = 0; i < stream->urbs_initialized; i++) { dev_dbg(&stream->udev->dev, "%s: submit urb=%d\n", __func__, i); ret = usb_submit_urb(stream->urb_list[i], GFP_ATOMIC); if (ret) { dev_err(&stream->udev->dev, "%s: could not submit urb no. %d - get them all back\n", KBUILD_MODNAME, i); usb_urb_killv2(stream); return ret; } stream->urbs_submitted++; } return 0; } static int usb_urb_free_urbs(struct usb_data_stream *stream) { int i; usb_urb_killv2(stream); for (i = stream->urbs_initialized - 1; i >= 0; i--) { if (stream->urb_list[i]) { dev_dbg(&stream->udev->dev, "%s: free urb=%d\n", __func__, i); /* free the URBs */ usb_free_urb(stream->urb_list[i]); } } stream->urbs_initialized = 0; return 0; } static int usb_urb_alloc_bulk_urbs(struct usb_data_stream *stream) { int i, j; /* allocate the URBs */ for (i = 0; i < stream->props.count; i++) { dev_dbg(&stream->udev->dev, "%s: alloc urb=%d\n", __func__, i); stream->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC); if (!stream->urb_list[i]) { dev_dbg(&stream->udev->dev, "%s: failed\n", __func__); for (j = 0; j < i; j++) usb_free_urb(stream->urb_list[j]); return -ENOMEM; } usb_fill_bulk_urb(stream->urb_list[i], stream->udev, usb_rcvbulkpipe(stream->udev, stream->props.endpoint), stream->buf_list[i], stream->props.u.bulk.buffersize, usb_urb_complete, stream); stream->urbs_initialized++; } return 0; } static int usb_urb_alloc_isoc_urbs(struct usb_data_stream *stream) { int i, j; /* allocate the URBs */ for (i = 0; i < stream->props.count; i++) { struct urb *urb; int frame_offset = 0; dev_dbg(&stream->udev->dev, "%s: alloc urb=%d\n", __func__, i); stream->urb_list[i] = usb_alloc_urb( stream->props.u.isoc.framesperurb, GFP_ATOMIC); if (!stream->urb_list[i]) { dev_dbg(&stream->udev->dev, "%s: failed\n", __func__); for (j = 0; j < i; j++) usb_free_urb(stream->urb_list[j]); return -ENOMEM; } urb = stream->urb_list[i]; urb->dev = stream->udev; urb->context = stream; urb->complete = usb_urb_complete; urb->pipe = usb_rcvisocpipe(stream->udev, stream->props.endpoint); urb->transfer_flags = URB_ISO_ASAP; urb->interval = stream->props.u.isoc.interval; urb->number_of_packets = stream->props.u.isoc.framesperurb; urb->transfer_buffer_length = stream->props.u.isoc.framesize * stream->props.u.isoc.framesperurb; urb->transfer_buffer = stream->buf_list[i]; for (j = 0; j < stream->props.u.isoc.framesperurb; j++) { urb->iso_frame_desc[j].offset = frame_offset; urb->iso_frame_desc[j].length = stream->props.u.isoc.framesize; frame_offset += stream->props.u.isoc.framesize; } stream->urbs_initialized++; } return 0; } static int usb_free_stream_buffers(struct usb_data_stream *stream) { if (stream->state & USB_STATE_URB_BUF) { while (stream->buf_num) { stream->buf_num--; kfree(stream->buf_list[stream->buf_num]); } } stream->state &= ~USB_STATE_URB_BUF; return 0; } static int usb_alloc_stream_buffers(struct usb_data_stream *stream, int num, unsigned long size) { stream->buf_num = 0; stream->buf_size = size; dev_dbg(&stream->udev->dev, "%s: all in all I will use %lu bytes for streaming\n", __func__, num * size); for (stream->buf_num = 0; stream->buf_num < num; stream->buf_num++) { stream->buf_list[stream->buf_num] = kzalloc(size, GFP_ATOMIC); if (!stream->buf_list[stream->buf_num]) { dev_dbg(&stream->udev->dev, "%s: alloc buf=%d failed\n", __func__, stream->buf_num); usb_free_stream_buffers(stream); return -ENOMEM; } dev_dbg(&stream->udev->dev, "%s: alloc buf=%d %p (dma %llu)\n", __func__, stream->buf_num, stream->buf_list[stream->buf_num], (long long)stream->dma_addr[stream->buf_num]); stream->state |= USB_STATE_URB_BUF; } return 0; } int usb_urb_reconfig(struct usb_data_stream *stream, struct usb_data_stream_properties *props) { int buf_size; if (!props) return 0; /* check allocated buffers are large enough for the request */ if (props->type == USB_BULK) { buf_size = stream->props.u.bulk.buffersize; } else if (props->type == USB_ISOC) { buf_size = props->u.isoc.framesize * props->u.isoc.framesperurb; } else { dev_err(&stream->udev->dev, "%s: invalid endpoint type=%d\n", KBUILD_MODNAME, props->type); return -EINVAL; } if (stream->buf_num < props->count || stream->buf_size < buf_size) { dev_err(&stream->udev->dev, "%s: cannot reconfigure as allocated buffers are too small\n", KBUILD_MODNAME); return -EINVAL; } /* check if all fields are same */ if (stream->props.type == props->type && stream->props.count == props->count && stream->props.endpoint == props->endpoint) { if (props->type == USB_BULK && props->u.bulk.buffersize == stream->props.u.bulk.buffersize) return 0; else if (props->type == USB_ISOC && props->u.isoc.framesperurb == stream->props.u.isoc.framesperurb && props->u.isoc.framesize == stream->props.u.isoc.framesize && props->u.isoc.interval == stream->props.u.isoc.interval) return 0; } dev_dbg(&stream->udev->dev, "%s: re-alloc urbs\n", __func__); usb_urb_free_urbs(stream); memcpy(&stream->props, props, sizeof(*props)); if (props->type == USB_BULK) return usb_urb_alloc_bulk_urbs(stream); else if (props->type == USB_ISOC) return usb_urb_alloc_isoc_urbs(stream); return 0; } int usb_urb_initv2(struct usb_data_stream *stream, const struct usb_data_stream_properties *props) { int ret; if (!stream || !props) return -EINVAL; memcpy(&stream->props, props, sizeof(*props)); if (!stream->complete) { dev_err(&stream->udev->dev, "%s: there is no data callback - this doesn't make sense\n", KBUILD_MODNAME); return -EINVAL; } switch (stream->props.type) { case USB_BULK: ret = usb_alloc_stream_buffers(stream, stream->props.count, stream->props.u.bulk.buffersize); if (ret < 0) return ret; return usb_urb_alloc_bulk_urbs(stream); case USB_ISOC: ret = usb_alloc_stream_buffers(stream, stream->props.count, stream->props.u.isoc.framesize * stream->props.u.isoc.framesperurb); if (ret < 0) return ret; return usb_urb_alloc_isoc_urbs(stream); default: dev_err(&stream->udev->dev, "%s: unknown urb-type for data transfer\n", KBUILD_MODNAME); return -EINVAL; } } int usb_urb_exitv2(struct usb_data_stream *stream) { usb_urb_free_urbs(stream); usb_free_stream_buffers(stream); return 0; }
16 1 2 8 9 4 4 2 3 1 7 11 9 9 5 5 1 1 3 1 1 3 5 4 1 4 2 1 3 3 3 1 2 1 1 2 1 1 1 1 1 5 3 2 1 8 1 1 4 4 1 1 1 1 1 2 1 7 5 1 1 1 1 1 1 5 4 85 1 1 5 8 6 1 7 15 2 2 5 4 5 9 4 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 // SPDX-License-Identifier: GPL-2.0+ /* * NILFS ioctl operations. * * Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation. * * Written by Koji Sato. */ #include <linux/fs.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/capability.h> /* capable() */ #include <linux/uaccess.h> /* copy_from_user(), copy_to_user() */ #include <linux/vmalloc.h> #include <linux/compat.h> /* compat_ptr() */ #include <linux/mount.h> /* mnt_want_write_file(), mnt_drop_write_file() */ #include <linux/buffer_head.h> #include <linux/fileattr.h> #include "nilfs.h" #include "segment.h" #include "bmap.h" #include "cpfile.h" #include "sufile.h" #include "dat.h" /** * nilfs_ioctl_wrap_copy - wrapping function of get/set metadata info * @nilfs: nilfs object * @argv: vector of arguments from userspace * @dir: set of direction flags * @dofunc: concrete function of get/set metadata info * * Description: nilfs_ioctl_wrap_copy() gets/sets metadata info by means of * calling dofunc() function on the basis of @argv argument. * * Return Value: On success, 0 is returned and requested metadata info * is copied into userspace. On error, one of the following * negative error codes is returned. * * %-EINVAL - Invalid arguments from userspace. * * %-ENOMEM - Insufficient amount of memory available. * * %-EFAULT - Failure during execution of requested operation. */ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs, struct nilfs_argv *argv, int dir, ssize_t (*dofunc)(struct the_nilfs *, __u64 *, int, void *, size_t, size_t)) { void *buf; void __user *base = (void __user *)(unsigned long)argv->v_base; size_t maxmembs, total, n; ssize_t nr; int ret, i; __u64 pos, ppos; if (argv->v_nmembs == 0) return 0; if ((size_t)argv->v_size > PAGE_SIZE) return -EINVAL; /* * Reject pairs of a start item position (argv->v_index) and a * total count (argv->v_nmembs) which leads position 'pos' to * overflow by the increment at the end of the loop. */ if (argv->v_index > ~(__u64)0 - argv->v_nmembs) return -EINVAL; buf = (void *)get_zeroed_page(GFP_NOFS); if (unlikely(!buf)) return -ENOMEM; maxmembs = PAGE_SIZE / argv->v_size; ret = 0; total = 0; pos = argv->v_index; for (i = 0; i < argv->v_nmembs; i += n) { n = (argv->v_nmembs - i < maxmembs) ? argv->v_nmembs - i : maxmembs; if ((dir & _IOC_WRITE) && copy_from_user(buf, base + argv->v_size * i, argv->v_size * n)) { ret = -EFAULT; break; } ppos = pos; nr = dofunc(nilfs, &pos, argv->v_flags, buf, argv->v_size, n); if (nr < 0) { ret = nr; break; } if ((dir & _IOC_READ) && copy_to_user(base + argv->v_size * i, buf, argv->v_size * nr)) { ret = -EFAULT; break; } total += nr; if ((size_t)nr < n) break; if (pos == ppos) pos += n; } argv->v_nmembs = total; free_pages((unsigned long)buf, 0); return ret; } /** * nilfs_fileattr_get - ioctl to support lsattr */ int nilfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) { struct inode *inode = d_inode(dentry); fileattr_fill_flags(fa, NILFS_I(inode)->i_flags & FS_FL_USER_VISIBLE); return 0; } /** * nilfs_fileattr_set - ioctl to support chattr */ int nilfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa) { struct inode *inode = d_inode(dentry); struct nilfs_transaction_info ti; unsigned int flags, oldflags; int ret; if (fileattr_has_fsx(fa)) return -EOPNOTSUPP; flags = nilfs_mask_flags(inode->i_mode, fa->flags); ret = nilfs_transaction_begin(inode->i_sb, &ti, 0); if (ret) return ret; oldflags = NILFS_I(inode)->i_flags & ~FS_FL_USER_MODIFIABLE; NILFS_I(inode)->i_flags = oldflags | (flags & FS_FL_USER_MODIFIABLE); nilfs_set_inode_flags(inode); inode_set_ctime_current(inode); if (IS_SYNC(inode)) nilfs_set_transaction_flag(NILFS_TI_SYNC); nilfs_mark_inode_dirty(inode); return nilfs_transaction_commit(inode->i_sb); } /** * nilfs_ioctl_getversion - get info about a file's version (generation number) */ static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp) { return put_user(inode->i_generation, (int __user *)argp); } /** * nilfs_ioctl_change_cpmode - change checkpoint mode (checkpoint/snapshot) * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_change_cpmode() function changes mode of * given checkpoint between checkpoint and snapshot state. This ioctl * is used in chcp and mkcp utilities. * * Return Value: On success, 0 is returned and mode of a checkpoint is * changed. On error, one of the following negative error codes * is returned. * * %-EPERM - Operation not permitted. * * %-EFAULT - Failure during checkpoint mode changing. */ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct nilfs_transaction_info ti; struct nilfs_cpmode cpmode; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(filp); if (ret) return ret; ret = -EFAULT; if (copy_from_user(&cpmode, argp, sizeof(cpmode))) goto out; mutex_lock(&nilfs->ns_snapshot_mount_mutex); nilfs_transaction_begin(inode->i_sb, &ti, 0); ret = nilfs_cpfile_change_cpmode( nilfs->ns_cpfile, cpmode.cm_cno, cpmode.cm_mode); if (unlikely(ret < 0)) nilfs_transaction_abort(inode->i_sb); else nilfs_transaction_commit(inode->i_sb); /* never fails */ mutex_unlock(&nilfs->ns_snapshot_mount_mutex); out: mnt_drop_write_file(filp); return ret; } /** * nilfs_ioctl_delete_checkpoint - remove checkpoint * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_delete_checkpoint() function removes * checkpoint from NILFS2 file system. This ioctl is used in rmcp * utility. * * Return Value: On success, 0 is returned and a checkpoint is * removed. On error, one of the following negative error codes * is returned. * * %-EPERM - Operation not permitted. * * %-EFAULT - Failure during checkpoint removing. */ static int nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct nilfs_transaction_info ti; __u64 cno; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(filp); if (ret) return ret; ret = -EFAULT; if (copy_from_user(&cno, argp, sizeof(cno))) goto out; nilfs_transaction_begin(inode->i_sb, &ti, 0); ret = nilfs_cpfile_delete_checkpoint(nilfs->ns_cpfile, cno); if (unlikely(ret < 0)) nilfs_transaction_abort(inode->i_sb); else nilfs_transaction_commit(inode->i_sb); /* never fails */ out: mnt_drop_write_file(filp); return ret; } /** * nilfs_ioctl_do_get_cpinfo - callback method getting info about checkpoints * @nilfs: nilfs object * @posp: pointer on array of checkpoint's numbers * @flags: checkpoint mode (checkpoint or snapshot) * @buf: buffer for storing checkponts' info * @size: size in bytes of one checkpoint info item in array * @nmembs: number of checkpoints in array (numbers and infos) * * Description: nilfs_ioctl_do_get_cpinfo() function returns info about * requested checkpoints. The NILFS_IOCTL_GET_CPINFO ioctl is used in * lscp utility and by nilfs_cleanerd daemon. * * Return value: count of nilfs_cpinfo structures in output buffer. */ static ssize_t nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, void *buf, size_t size, size_t nmembs) { int ret; down_read(&nilfs->ns_segctor_sem); ret = nilfs_cpfile_get_cpinfo(nilfs->ns_cpfile, posp, flags, buf, size, nmembs); up_read(&nilfs->ns_segctor_sem); return ret; } /** * nilfs_ioctl_get_cpstat - get checkpoints statistics * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_get_cpstat() returns information about checkpoints. * The NILFS_IOCTL_GET_CPSTAT ioctl is used by lscp, rmcp utilities * and by nilfs_cleanerd daemon. * * Return Value: On success, 0 is returned, and checkpoints information is * copied into userspace pointer @argp. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EFAULT - Failure during getting checkpoints statistics. */ static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct nilfs_cpstat cpstat; int ret; down_read(&nilfs->ns_segctor_sem); ret = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat); up_read(&nilfs->ns_segctor_sem); if (ret < 0) return ret; if (copy_to_user(argp, &cpstat, sizeof(cpstat))) ret = -EFAULT; return ret; } /** * nilfs_ioctl_do_get_suinfo - callback method getting segment usage info * @nilfs: nilfs object * @posp: pointer on array of segment numbers * @flags: *not used* * @buf: buffer for storing suinfo array * @size: size in bytes of one suinfo item in array * @nmembs: count of segment numbers and suinfos in array * * Description: nilfs_ioctl_do_get_suinfo() function returns segment usage * info about requested segments. The NILFS_IOCTL_GET_SUINFO ioctl is used * in lssu, nilfs_resize utilities and by nilfs_cleanerd daemon. * * Return value: count of nilfs_suinfo structures in output buffer. */ static ssize_t nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, void *buf, size_t size, size_t nmembs) { int ret; down_read(&nilfs->ns_segctor_sem); ret = nilfs_sufile_get_suinfo(nilfs->ns_sufile, *posp, buf, size, nmembs); up_read(&nilfs->ns_segctor_sem); return ret; } /** * nilfs_ioctl_get_sustat - get segment usage statistics * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_get_sustat() returns segment usage statistics. * The NILFS_IOCTL_GET_SUSTAT ioctl is used in lssu, nilfs_resize utilities * and by nilfs_cleanerd daemon. * * Return Value: On success, 0 is returned, and segment usage information is * copied into userspace pointer @argp. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EFAULT - Failure during getting segment usage statistics. */ static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct nilfs_sustat sustat; int ret; down_read(&nilfs->ns_segctor_sem); ret = nilfs_sufile_get_stat(nilfs->ns_sufile, &sustat); up_read(&nilfs->ns_segctor_sem); if (ret < 0) return ret; if (copy_to_user(argp, &sustat, sizeof(sustat))) ret = -EFAULT; return ret; } /** * nilfs_ioctl_do_get_vinfo - callback method getting virtual blocks info * @nilfs: nilfs object * @posp: *not used* * @flags: *not used* * @buf: buffer for storing array of nilfs_vinfo structures * @size: size in bytes of one vinfo item in array * @nmembs: count of vinfos in array * * Description: nilfs_ioctl_do_get_vinfo() function returns information * on virtual block addresses. The NILFS_IOCTL_GET_VINFO ioctl is used * by nilfs_cleanerd daemon. * * Return value: count of nilfs_vinfo structures in output buffer. */ static ssize_t nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, void *buf, size_t size, size_t nmembs) { int ret; down_read(&nilfs->ns_segctor_sem); ret = nilfs_dat_get_vinfo(nilfs->ns_dat, buf, size, nmembs); up_read(&nilfs->ns_segctor_sem); return ret; } /** * nilfs_ioctl_do_get_bdescs - callback method getting disk block descriptors * @nilfs: nilfs object * @posp: *not used* * @flags: *not used* * @buf: buffer for storing array of nilfs_bdesc structures * @size: size in bytes of one bdesc item in array * @nmembs: count of bdescs in array * * Description: nilfs_ioctl_do_get_bdescs() function returns information * about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl * is used by nilfs_cleanerd daemon. * * Return value: count of nilfs_bdescs structures in output buffer. */ static ssize_t nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags, void *buf, size_t size, size_t nmembs) { struct nilfs_bmap *bmap = NILFS_I(nilfs->ns_dat)->i_bmap; struct nilfs_bdesc *bdescs = buf; int ret, i; down_read(&nilfs->ns_segctor_sem); for (i = 0; i < nmembs; i++) { ret = nilfs_bmap_lookup_at_level(bmap, bdescs[i].bd_offset, bdescs[i].bd_level + 1, &bdescs[i].bd_blocknr); if (ret < 0) { if (ret != -ENOENT) { up_read(&nilfs->ns_segctor_sem); return ret; } bdescs[i].bd_blocknr = 0; } } up_read(&nilfs->ns_segctor_sem); return nmembs; } /** * nilfs_ioctl_get_bdescs - get disk block descriptors * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_do_get_bdescs() function returns information * about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl * is used by nilfs_cleanerd daemon. * * Return Value: On success, 0 is returned, and disk block descriptors are * copied into userspace pointer @argp. On error, one of the following * negative error codes is returned. * * %-EINVAL - Invalid arguments from userspace. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EFAULT - Failure during getting disk block descriptors. */ static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct nilfs_argv argv; int ret; if (copy_from_user(&argv, argp, sizeof(argv))) return -EFAULT; if (argv.v_size != sizeof(struct nilfs_bdesc)) return -EINVAL; ret = nilfs_ioctl_wrap_copy(nilfs, &argv, _IOC_DIR(cmd), nilfs_ioctl_do_get_bdescs); if (ret < 0) return ret; if (copy_to_user(argp, &argv, sizeof(argv))) ret = -EFAULT; return ret; } /** * nilfs_ioctl_move_inode_block - prepare data/node block for moving by GC * @inode: inode object * @vdesc: descriptor of virtual block number * @buffers: list of moving buffers * * Description: nilfs_ioctl_move_inode_block() function registers data/node * buffer in the GC pagecache and submit read request. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - Requested block doesn't exist. * * %-EEXIST - Blocks conflict is detected. */ static int nilfs_ioctl_move_inode_block(struct inode *inode, struct nilfs_vdesc *vdesc, struct list_head *buffers) { struct buffer_head *bh; int ret; if (vdesc->vd_flags == 0) ret = nilfs_gccache_submit_read_data( inode, vdesc->vd_offset, vdesc->vd_blocknr, vdesc->vd_vblocknr, &bh); else ret = nilfs_gccache_submit_read_node( inode, vdesc->vd_blocknr, vdesc->vd_vblocknr, &bh); if (unlikely(ret < 0)) { if (ret == -ENOENT) nilfs_crit(inode->i_sb, "%s: invalid virtual block address (%s): ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu", __func__, vdesc->vd_flags ? "node" : "data", (unsigned long long)vdesc->vd_ino, (unsigned long long)vdesc->vd_cno, (unsigned long long)vdesc->vd_offset, (unsigned long long)vdesc->vd_blocknr, (unsigned long long)vdesc->vd_vblocknr); return ret; } if (unlikely(!list_empty(&bh->b_assoc_buffers))) { nilfs_crit(inode->i_sb, "%s: conflicting %s buffer: ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu", __func__, vdesc->vd_flags ? "node" : "data", (unsigned long long)vdesc->vd_ino, (unsigned long long)vdesc->vd_cno, (unsigned long long)vdesc->vd_offset, (unsigned long long)vdesc->vd_blocknr, (unsigned long long)vdesc->vd_vblocknr); brelse(bh); return -EEXIST; } list_add_tail(&bh->b_assoc_buffers, buffers); return 0; } /** * nilfs_ioctl_move_blocks - move valid inode's blocks during garbage collection * @sb: superblock object * @argv: vector of arguments from userspace * @buf: array of nilfs_vdesc structures * * Description: nilfs_ioctl_move_blocks() function reads valid data/node * blocks that garbage collector specified with the array of nilfs_vdesc * structures and stores them into page caches of GC inodes. * * Return Value: Number of processed nilfs_vdesc structures or * error code, otherwise. */ static int nilfs_ioctl_move_blocks(struct super_block *sb, struct nilfs_argv *argv, void *buf) { size_t nmembs = argv->v_nmembs; struct the_nilfs *nilfs = sb->s_fs_info; struct inode *inode; struct nilfs_vdesc *vdesc; struct buffer_head *bh, *n; LIST_HEAD(buffers); ino_t ino; __u64 cno; int i, ret; for (i = 0, vdesc = buf; i < nmembs; ) { ino = vdesc->vd_ino; cno = vdesc->vd_cno; inode = nilfs_iget_for_gc(sb, ino, cno); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto failed; } if (list_empty(&NILFS_I(inode)->i_dirty)) { /* * Add the inode to GC inode list. Garbage Collection * is serialized and no two processes manipulate the * list simultaneously. */ igrab(inode); list_add(&NILFS_I(inode)->i_dirty, &nilfs->ns_gc_inodes); } do { ret = nilfs_ioctl_move_inode_block(inode, vdesc, &buffers); if (unlikely(ret < 0)) { iput(inode); goto failed; } vdesc++; } while (++i < nmembs && vdesc->vd_ino == ino && vdesc->vd_cno == cno); iput(inode); /* The inode still remains in GC inode list */ } list_for_each_entry_safe(bh, n, &buffers, b_assoc_buffers) { ret = nilfs_gccache_wait_and_mark_dirty(bh); if (unlikely(ret < 0)) { WARN_ON(ret == -EEXIST); goto failed; } list_del_init(&bh->b_assoc_buffers); brelse(bh); } return nmembs; failed: list_for_each_entry_safe(bh, n, &buffers, b_assoc_buffers) { list_del_init(&bh->b_assoc_buffers); brelse(bh); } return ret; } /** * nilfs_ioctl_delete_checkpoints - delete checkpoints * @nilfs: nilfs object * @argv: vector of arguments from userspace * @buf: array of periods of checkpoints numbers * * Description: nilfs_ioctl_delete_checkpoints() function deletes checkpoints * in the period from p_start to p_end, excluding p_end itself. The checkpoints * which have been already deleted are ignored. * * Return Value: Number of processed nilfs_period structures or * error code, otherwise. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EINVAL - invalid checkpoints. */ static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs, struct nilfs_argv *argv, void *buf) { size_t nmembs = argv->v_nmembs; struct inode *cpfile = nilfs->ns_cpfile; struct nilfs_period *periods = buf; int ret, i; for (i = 0; i < nmembs; i++) { ret = nilfs_cpfile_delete_checkpoints( cpfile, periods[i].p_start, periods[i].p_end); if (ret < 0) return ret; } return nmembs; } /** * nilfs_ioctl_free_vblocknrs - free virtual block numbers * @nilfs: nilfs object * @argv: vector of arguments from userspace * @buf: array of virtual block numbers * * Description: nilfs_ioctl_free_vblocknrs() function frees * the virtual block numbers specified by @buf and @argv->v_nmembs. * * Return Value: Number of processed virtual block numbers or * error code, otherwise. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - The virtual block number have not been allocated. */ static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs, struct nilfs_argv *argv, void *buf) { size_t nmembs = argv->v_nmembs; int ret; ret = nilfs_dat_freev(nilfs->ns_dat, buf, nmembs); return (ret < 0) ? ret : nmembs; } /** * nilfs_ioctl_mark_blocks_dirty - mark blocks dirty * @nilfs: nilfs object * @argv: vector of arguments from userspace * @buf: array of block descriptors * * Description: nilfs_ioctl_mark_blocks_dirty() function marks * metadata file or data blocks as dirty. * * Return Value: Number of processed block descriptors or * error code, otherwise. * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error * * %-ENOENT - the specified block does not exist (hole block) */ static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs, struct nilfs_argv *argv, void *buf) { size_t nmembs = argv->v_nmembs; struct nilfs_bmap *bmap = NILFS_I(nilfs->ns_dat)->i_bmap; struct nilfs_bdesc *bdescs = buf; struct buffer_head *bh; int ret, i; for (i = 0; i < nmembs; i++) { /* XXX: use macro or inline func to check liveness */ ret = nilfs_bmap_lookup_at_level(bmap, bdescs[i].bd_offset, bdescs[i].bd_level + 1, &bdescs[i].bd_blocknr); if (ret < 0) { if (ret != -ENOENT) return ret; bdescs[i].bd_blocknr = 0; } if (bdescs[i].bd_blocknr != bdescs[i].bd_oblocknr) /* skip dead block */ continue; if (bdescs[i].bd_level == 0) { ret = nilfs_mdt_get_block(nilfs->ns_dat, bdescs[i].bd_offset, false, NULL, &bh); if (unlikely(ret)) { WARN_ON(ret == -ENOENT); return ret; } mark_buffer_dirty(bh); nilfs_mdt_mark_dirty(nilfs->ns_dat); put_bh(bh); } else { ret = nilfs_bmap_mark(bmap, bdescs[i].bd_offset, bdescs[i].bd_level); if (ret < 0) { WARN_ON(ret == -ENOENT); return ret; } } } return nmembs; } int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs, struct nilfs_argv *argv, void **kbufs) { const char *msg; int ret; ret = nilfs_ioctl_delete_checkpoints(nilfs, &argv[1], kbufs[1]); if (ret < 0) { /* * can safely abort because checkpoints can be removed * independently. */ msg = "cannot delete checkpoints"; goto failed; } ret = nilfs_ioctl_free_vblocknrs(nilfs, &argv[2], kbufs[2]); if (ret < 0) { /* * can safely abort because DAT file is updated atomically * using a copy-on-write technique. */ msg = "cannot delete virtual blocks from DAT file"; goto failed; } ret = nilfs_ioctl_mark_blocks_dirty(nilfs, &argv[3], kbufs[3]); if (ret < 0) { /* * can safely abort because the operation is nondestructive. */ msg = "cannot mark copying blocks dirty"; goto failed; } return 0; failed: nilfs_err(nilfs->ns_sb, "error %d preparing GC: %s", ret, msg); return ret; } /** * nilfs_ioctl_clean_segments - clean segments * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_clean_segments() function makes garbage * collection operation in the environment of requested parameters * from userspace. The NILFS_IOCTL_CLEAN_SEGMENTS ioctl is used by * nilfs_cleanerd daemon. * * Return Value: On success, 0 is returned or error code, otherwise. */ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { struct nilfs_argv argv[5]; static const size_t argsz[5] = { sizeof(struct nilfs_vdesc), sizeof(struct nilfs_period), sizeof(__u64), sizeof(struct nilfs_bdesc), sizeof(__u64), }; void __user *base; void *kbufs[5]; struct the_nilfs *nilfs; size_t len, nsegs; int n, ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(filp); if (ret) return ret; ret = -EFAULT; if (copy_from_user(argv, argp, sizeof(argv))) goto out; ret = -EINVAL; nsegs = argv[4].v_nmembs; if (argv[4].v_size != argsz[4]) goto out; /* * argv[4] points to segment numbers this ioctl cleans. We * use kmalloc() for its buffer because the memory used for the * segment numbers is small enough. */ kbufs[4] = memdup_array_user((void __user *)(unsigned long)argv[4].v_base, nsegs, sizeof(__u64)); if (IS_ERR(kbufs[4])) { ret = PTR_ERR(kbufs[4]); goto out; } nilfs = inode->i_sb->s_fs_info; for (n = 0; n < 4; n++) { ret = -EINVAL; if (argv[n].v_size != argsz[n]) goto out_free; if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment) goto out_free; if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size) goto out_free; len = argv[n].v_size * argv[n].v_nmembs; base = (void __user *)(unsigned long)argv[n].v_base; if (len == 0) { kbufs[n] = NULL; continue; } kbufs[n] = vmalloc(len); if (!kbufs[n]) { ret = -ENOMEM; goto out_free; } if (copy_from_user(kbufs[n], base, len)) { ret = -EFAULT; vfree(kbufs[n]); goto out_free; } } /* * nilfs_ioctl_move_blocks() will call nilfs_iget_for_gc(), * which will operates an inode list without blocking. * To protect the list from concurrent operations, * nilfs_ioctl_move_blocks should be atomic operation. */ if (test_and_set_bit(THE_NILFS_GC_RUNNING, &nilfs->ns_flags)) { ret = -EBUSY; goto out_free; } ret = nilfs_ioctl_move_blocks(inode->i_sb, &argv[0], kbufs[0]); if (ret < 0) { nilfs_err(inode->i_sb, "error %d preparing GC: cannot read source blocks", ret); } else { if (nilfs_sb_need_update(nilfs)) set_nilfs_discontinued(nilfs); ret = nilfs_clean_segments(inode->i_sb, argv, kbufs); } nilfs_remove_all_gcinodes(nilfs); clear_nilfs_gc_running(nilfs); out_free: while (--n >= 0) vfree(kbufs[n]); kfree(kbufs[4]); out: mnt_drop_write_file(filp); return ret; } /** * nilfs_ioctl_sync - make a checkpoint * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_sync() function constructs a logical segment * for checkpointing. This function guarantees that all modified data * and metadata are written out to the device when it successfully * returned. * * Return Value: On success, 0 is retured. On errors, one of the following * negative error code is returned. * * %-EROFS - Read only filesystem. * * %-EIO - I/O error * * %-ENOSPC - No space left on device (only in a panic state). * * %-ERESTARTSYS - Interrupted. * * %-ENOMEM - Insufficient memory available. * * %-EFAULT - Failure during execution of requested operation. */ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { __u64 cno; int ret; struct the_nilfs *nilfs; ret = nilfs_construct_segment(inode->i_sb); if (ret < 0) return ret; nilfs = inode->i_sb->s_fs_info; ret = nilfs_flush_device(nilfs); if (ret < 0) return ret; if (argp != NULL) { down_read(&nilfs->ns_segctor_sem); cno = nilfs->ns_cno - 1; up_read(&nilfs->ns_segctor_sem); if (copy_to_user(argp, &cno, sizeof(cno))) return -EFAULT; } return 0; } /** * nilfs_ioctl_resize - resize NILFS2 volume * @inode: inode object * @filp: file object * @argp: pointer on argument from userspace * * Return Value: On success, 0 is returned or error code, otherwise. */ static int nilfs_ioctl_resize(struct inode *inode, struct file *filp, void __user *argp) { __u64 newsize; int ret = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out; ret = mnt_want_write_file(filp); if (ret) goto out; ret = -EFAULT; if (copy_from_user(&newsize, argp, sizeof(newsize))) goto out_drop_write; ret = nilfs_resize_fs(inode->i_sb, newsize); out_drop_write: mnt_drop_write_file(filp); out: return ret; } /** * nilfs_ioctl_trim_fs() - trim ioctl handle function * @inode: inode object * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_trim_fs is the FITRIM ioctl handle function. It * checks the arguments from userspace and calls nilfs_sufile_trim_fs, which * performs the actual trim operation. * * Return Value: On success, 0 is returned or negative error code, otherwise. */ static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct fstrim_range range; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!bdev_max_discard_sectors(nilfs->ns_bdev)) return -EOPNOTSUPP; if (copy_from_user(&range, argp, sizeof(range))) return -EFAULT; range.minlen = max_t(u64, range.minlen, bdev_discard_granularity(nilfs->ns_bdev)); down_read(&nilfs->ns_segctor_sem); ret = nilfs_sufile_trim_fs(nilfs->ns_sufile, &range); up_read(&nilfs->ns_segctor_sem); if (ret < 0) return ret; if (copy_to_user(argp, &range, sizeof(range))) return -EFAULT; return 0; } /** * nilfs_ioctl_set_alloc_range - limit range of segments to be allocated * @inode: inode object * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_set_alloc_range() function defines lower limit * of segments in bytes and upper limit of segments in bytes. * The NILFS_IOCTL_SET_ALLOC_RANGE is used by nilfs_resize utility. * * Return Value: On success, 0 is returned or error code, otherwise. */ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; __u64 range[2]; __u64 minseg, maxseg; unsigned long segbytes; int ret = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out; ret = -EFAULT; if (copy_from_user(range, argp, sizeof(__u64[2]))) goto out; ret = -ERANGE; if (range[1] > bdev_nr_bytes(inode->i_sb->s_bdev)) goto out; segbytes = nilfs->ns_blocks_per_segment * nilfs->ns_blocksize; minseg = range[0] + segbytes - 1; minseg = div64_ul(minseg, segbytes); if (range[1] < 4096) goto out; maxseg = NILFS_SB2_OFFSET_BYTES(range[1]); if (maxseg < segbytes) goto out; maxseg = div64_ul(maxseg, segbytes); maxseg--; ret = nilfs_sufile_set_alloc_range(nilfs->ns_sufile, minseg, maxseg); out: return ret; } /** * nilfs_ioctl_get_info - wrapping function of get metadata info * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * @membsz: size of an item in bytes * @dofunc: concrete function of getting metadata info * * Description: nilfs_ioctl_get_info() gets metadata info by means of * calling dofunc() function. * * Return Value: On success, 0 is returned and requested metadata info * is copied into userspace. On error, one of the following * negative error codes is returned. * * %-EINVAL - Invalid arguments from userspace. * * %-ENOMEM - Insufficient amount of memory available. * * %-EFAULT - Failure during execution of requested operation. */ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp, size_t membsz, ssize_t (*dofunc)(struct the_nilfs *, __u64 *, int, void *, size_t, size_t)) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct nilfs_argv argv; int ret; if (copy_from_user(&argv, argp, sizeof(argv))) return -EFAULT; if (argv.v_size < membsz) return -EINVAL; ret = nilfs_ioctl_wrap_copy(nilfs, &argv, _IOC_DIR(cmd), dofunc); if (ret < 0) return ret; if (copy_to_user(argp, &argv, sizeof(argv))) ret = -EFAULT; return ret; } /** * nilfs_ioctl_set_suinfo - set segment usage info * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * * Description: Expects an array of nilfs_suinfo_update structures * encapsulated in nilfs_argv and updates the segment usage info * according to the flags in nilfs_suinfo_update. * * Return Value: On success, 0 is returned. On error, one of the * following negative error codes is returned. * * %-EPERM - Not enough permissions * * %-EFAULT - Error copying input data * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EINVAL - Invalid values in input (segment number, flags or nblocks) */ static int nilfs_ioctl_set_suinfo(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct nilfs_transaction_info ti; struct nilfs_argv argv; size_t len; void __user *base; void *kbuf; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(filp); if (ret) return ret; ret = -EFAULT; if (copy_from_user(&argv, argp, sizeof(argv))) goto out; ret = -EINVAL; if (argv.v_size < sizeof(struct nilfs_suinfo_update)) goto out; if (argv.v_nmembs > nilfs->ns_nsegments) goto out; if (argv.v_nmembs >= UINT_MAX / argv.v_size) goto out; len = argv.v_size * argv.v_nmembs; if (!len) { ret = 0; goto out; } base = (void __user *)(unsigned long)argv.v_base; kbuf = vmalloc(len); if (!kbuf) { ret = -ENOMEM; goto out; } if (copy_from_user(kbuf, base, len)) { ret = -EFAULT; goto out_free; } nilfs_transaction_begin(inode->i_sb, &ti, 0); ret = nilfs_sufile_set_suinfo(nilfs->ns_sufile, kbuf, argv.v_size, argv.v_nmembs); if (unlikely(ret < 0)) nilfs_transaction_abort(inode->i_sb); else nilfs_transaction_commit(inode->i_sb); /* never fails */ out_free: vfree(kbuf); out: mnt_drop_write_file(filp); return ret; } long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); void __user *argp = (void __user *)arg; switch (cmd) { case FS_IOC_GETVERSION: return nilfs_ioctl_getversion(inode, argp); case NILFS_IOCTL_CHANGE_CPMODE: return nilfs_ioctl_change_cpmode(inode, filp, cmd, argp); case NILFS_IOCTL_DELETE_CHECKPOINT: return nilfs_ioctl_delete_checkpoint(inode, filp, cmd, argp); case NILFS_IOCTL_GET_CPINFO: return nilfs_ioctl_get_info(inode, filp, cmd, argp, sizeof(struct nilfs_cpinfo), nilfs_ioctl_do_get_cpinfo); case NILFS_IOCTL_GET_CPSTAT: return nilfs_ioctl_get_cpstat(inode, filp, cmd, argp); case NILFS_IOCTL_GET_SUINFO: return nilfs_ioctl_get_info(inode, filp, cmd, argp, sizeof(struct nilfs_suinfo), nilfs_ioctl_do_get_suinfo); case NILFS_IOCTL_SET_SUINFO: return nilfs_ioctl_set_suinfo(inode, filp, cmd, argp); case NILFS_IOCTL_GET_SUSTAT: return nilfs_ioctl_get_sustat(inode, filp, cmd, argp); case NILFS_IOCTL_GET_VINFO: return nilfs_ioctl_get_info(inode, filp, cmd, argp, sizeof(struct nilfs_vinfo), nilfs_ioctl_do_get_vinfo); case NILFS_IOCTL_GET_BDESCS: return nilfs_ioctl_get_bdescs(inode, filp, cmd, argp); case NILFS_IOCTL_CLEAN_SEGMENTS: return nilfs_ioctl_clean_segments(inode, filp, cmd, argp); case NILFS_IOCTL_SYNC: return nilfs_ioctl_sync(inode, filp, cmd, argp); case NILFS_IOCTL_RESIZE: return nilfs_ioctl_resize(inode, filp, argp); case NILFS_IOCTL_SET_ALLOC_RANGE: return nilfs_ioctl_set_alloc_range(inode, argp); case FITRIM: return nilfs_ioctl_trim_fs(inode, argp); default: return -ENOTTY; } } #ifdef CONFIG_COMPAT long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { switch (cmd) { case FS_IOC32_GETVERSION: cmd = FS_IOC_GETVERSION; break; case NILFS_IOCTL_CHANGE_CPMODE: case NILFS_IOCTL_DELETE_CHECKPOINT: case NILFS_IOCTL_GET_CPINFO: case NILFS_IOCTL_GET_CPSTAT: case NILFS_IOCTL_GET_SUINFO: case NILFS_IOCTL_SET_SUINFO: case NILFS_IOCTL_GET_SUSTAT: case NILFS_IOCTL_GET_VINFO: case NILFS_IOCTL_GET_BDESCS: case NILFS_IOCTL_CLEAN_SEGMENTS: case NILFS_IOCTL_SYNC: case NILFS_IOCTL_RESIZE: case NILFS_IOCTL_SET_ALLOC_RANGE: case FITRIM: break; default: return -ENOIOCTLCMD; } return nilfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); } #endif
1 4 5 5 4 1 808 673 12 11 297 42 20 12 153 70 24 21 33 21 24 28 12 21 10 44 222 265 2 8 216 11 212 306 43 39 24 202 305 225 43 4 4 4 4 2 1 1 1 1 11 1 10 4 4 1 2 2 6 1 5 1 5 3 3 3 1 148 1 149 184 17 148 163 1 89 2 70 2 2 14 150 36 16 4 2 1 1 4 3 1 1 1 1 1 225 17 141 5 9 58 15 18 6 6 35 12 7 3 4 6 6 2 2 1 1 3 3 7 14 8 7 5 5 6 1 5 1 1 14 1 13 1 56 15 9 31 33 33 49 49 27 27 69 16 3 50 32 21 40 39 17 16 15 10 5 22 13 9 9 2 2 8 8 1 1 53 53 27 26 35 35 20 20 12 22 5 24 34 5 13 13 13 13 12 1 14 12 15 15 14 8 4 2 9 5 9 12 10 2 2 5 5 5 2 16 6 12 8 4 14 6 8 12 1 1 1 1 1 20 3 1 1 10 13 1 2 2 2 50 20 2 1 3 1 2 2 2 1 2 3 2 1209 169 1212 1363 1334 46 1374 1369 1 2 4 14 1 751 626 4 1360 1351 1329 45 1613 141 2 7 12 2 14 26 3 69 2 4 1610 32 165 1410 1567 315 72 834 1611 1610 9 1160 459 1614 1265 1615 790 831 2 14 1 1509 808 889 1370 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 // SPDX-License-Identifier: GPL-2.0-or-later /* * Video capture interface for Linux version 2 * * A generic framework to process V4L2 ioctl commands. * * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1) * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2) */ #include <linux/compat.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/version.h> #include <linux/v4l2-subdev.h> #include <linux/videodev2.h> #include <media/media-device.h> /* for media_set_bus_info() */ #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-fh.h> #include <media/v4l2-event.h> #include <media/v4l2-device.h> #include <media/videobuf2-v4l2.h> #include <media/v4l2-mc.h> #include <media/v4l2-mem2mem.h> #include <trace/events/v4l2.h> #define is_valid_ioctl(vfd, cmd) test_bit(_IOC_NR(cmd), (vfd)->valid_ioctls) struct std_descr { v4l2_std_id std; const char *descr; }; static const struct std_descr standards[] = { { V4L2_STD_NTSC, "NTSC" }, { V4L2_STD_NTSC_M, "NTSC-M" }, { V4L2_STD_NTSC_M_JP, "NTSC-M-JP" }, { V4L2_STD_NTSC_M_KR, "NTSC-M-KR" }, { V4L2_STD_NTSC_443, "NTSC-443" }, { V4L2_STD_PAL, "PAL" }, { V4L2_STD_PAL_BG, "PAL-BG" }, { V4L2_STD_PAL_B, "PAL-B" }, { V4L2_STD_PAL_B1, "PAL-B1" }, { V4L2_STD_PAL_G, "PAL-G" }, { V4L2_STD_PAL_H, "PAL-H" }, { V4L2_STD_PAL_I, "PAL-I" }, { V4L2_STD_PAL_DK, "PAL-DK" }, { V4L2_STD_PAL_D, "PAL-D" }, { V4L2_STD_PAL_D1, "PAL-D1" }, { V4L2_STD_PAL_K, "PAL-K" }, { V4L2_STD_PAL_M, "PAL-M" }, { V4L2_STD_PAL_N, "PAL-N" }, { V4L2_STD_PAL_Nc, "PAL-Nc" }, { V4L2_STD_PAL_60, "PAL-60" }, { V4L2_STD_SECAM, "SECAM" }, { V4L2_STD_SECAM_B, "SECAM-B" }, { V4L2_STD_SECAM_G, "SECAM-G" }, { V4L2_STD_SECAM_H, "SECAM-H" }, { V4L2_STD_SECAM_DK, "SECAM-DK" }, { V4L2_STD_SECAM_D, "SECAM-D" }, { V4L2_STD_SECAM_K, "SECAM-K" }, { V4L2_STD_SECAM_K1, "SECAM-K1" }, { V4L2_STD_SECAM_L, "SECAM-L" }, { V4L2_STD_SECAM_LC, "SECAM-Lc" }, { 0, "Unknown" } }; /* video4linux standard ID conversion to standard name */ const char *v4l2_norm_to_name(v4l2_std_id id) { u32 myid = id; int i; /* HACK: ppc32 architecture doesn't have __ucmpdi2 function to handle 64 bit comparisons. So, on that architecture, with some gcc variants, compilation fails. Currently, the max value is 30bit wide. */ BUG_ON(myid != id); for (i = 0; standards[i].std; i++) if (myid == standards[i].std) break; return standards[i].descr; } EXPORT_SYMBOL(v4l2_norm_to_name); /* Returns frame period for the given standard */ void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod) { if (id & V4L2_STD_525_60) { frameperiod->numerator = 1001; frameperiod->denominator = 30000; } else { frameperiod->numerator = 1; frameperiod->denominator = 25; } } EXPORT_SYMBOL(v4l2_video_std_frame_period); /* Fill in the fields of a v4l2_standard structure according to the 'id' and 'transmission' parameters. Returns negative on error. */ int v4l2_video_std_construct(struct v4l2_standard *vs, int id, const char *name) { vs->id = id; v4l2_video_std_frame_period(id, &vs->frameperiod); vs->framelines = (id & V4L2_STD_525_60) ? 525 : 625; strscpy(vs->name, name, sizeof(vs->name)); return 0; } EXPORT_SYMBOL(v4l2_video_std_construct); /* Fill in the fields of a v4l2_standard structure according to the * 'id' and 'vs->index' parameters. Returns negative on error. */ int v4l_video_std_enumstd(struct v4l2_standard *vs, v4l2_std_id id) { v4l2_std_id curr_id = 0; unsigned int index = vs->index, i, j = 0; const char *descr = ""; /* Return -ENODATA if the id for the current input or output is 0, meaning that it doesn't support this API. */ if (id == 0) return -ENODATA; /* Return norm array in a canonical way */ for (i = 0; i <= index && id; i++) { /* last std value in the standards array is 0, so this while always ends there since (id & 0) == 0. */ while ((id & standards[j].std) != standards[j].std) j++; curr_id = standards[j].std; descr = standards[j].descr; j++; if (curr_id == 0) break; if (curr_id != V4L2_STD_PAL && curr_id != V4L2_STD_SECAM && curr_id != V4L2_STD_NTSC) id &= ~curr_id; } if (i <= index) return -EINVAL; v4l2_video_std_construct(vs, curr_id, descr); return 0; } /* ----------------------------------------------------------------- */ /* some arrays for pretty-printing debug messages of enum types */ const char *v4l2_field_names[] = { [V4L2_FIELD_ANY] = "any", [V4L2_FIELD_NONE] = "none", [V4L2_FIELD_TOP] = "top", [V4L2_FIELD_BOTTOM] = "bottom", [V4L2_FIELD_INTERLACED] = "interlaced", [V4L2_FIELD_SEQ_TB] = "seq-tb", [V4L2_FIELD_SEQ_BT] = "seq-bt", [V4L2_FIELD_ALTERNATE] = "alternate", [V4L2_FIELD_INTERLACED_TB] = "interlaced-tb", [V4L2_FIELD_INTERLACED_BT] = "interlaced-bt", }; EXPORT_SYMBOL(v4l2_field_names); const char *v4l2_type_names[] = { [0] = "0", [V4L2_BUF_TYPE_VIDEO_CAPTURE] = "vid-cap", [V4L2_BUF_TYPE_VIDEO_OVERLAY] = "vid-overlay", [V4L2_BUF_TYPE_VIDEO_OUTPUT] = "vid-out", [V4L2_BUF_TYPE_VBI_CAPTURE] = "vbi-cap", [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out", [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-cap", [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out", [V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "vid-out-overlay", [V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE] = "vid-cap-mplane", [V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE] = "vid-out-mplane", [V4L2_BUF_TYPE_SDR_CAPTURE] = "sdr-cap", [V4L2_BUF_TYPE_SDR_OUTPUT] = "sdr-out", [V4L2_BUF_TYPE_META_CAPTURE] = "meta-cap", [V4L2_BUF_TYPE_META_OUTPUT] = "meta-out", }; EXPORT_SYMBOL(v4l2_type_names); static const char *v4l2_memory_names[] = { [V4L2_MEMORY_MMAP] = "mmap", [V4L2_MEMORY_USERPTR] = "userptr", [V4L2_MEMORY_OVERLAY] = "overlay", [V4L2_MEMORY_DMABUF] = "dmabuf", }; #define prt_names(a, arr) (((unsigned)(a)) < ARRAY_SIZE(arr) ? arr[a] : "unknown") /* ------------------------------------------------------------------ */ /* debug help functions */ static void v4l_print_querycap(const void *arg, bool write_only) { const struct v4l2_capability *p = arg; pr_cont("driver=%.*s, card=%.*s, bus=%.*s, version=0x%08x, capabilities=0x%08x, device_caps=0x%08x\n", (int)sizeof(p->driver), p->driver, (int)sizeof(p->card), p->card, (int)sizeof(p->bus_info), p->bus_info, p->version, p->capabilities, p->device_caps); } static void v4l_print_enuminput(const void *arg, bool write_only) { const struct v4l2_input *p = arg; pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, std=0x%08Lx, status=0x%x, capabilities=0x%x\n", p->index, (int)sizeof(p->name), p->name, p->type, p->audioset, p->tuner, (unsigned long long)p->std, p->status, p->capabilities); } static void v4l_print_enumoutput(const void *arg, bool write_only) { const struct v4l2_output *p = arg; pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, modulator=%u, std=0x%08Lx, capabilities=0x%x\n", p->index, (int)sizeof(p->name), p->name, p->type, p->audioset, p->modulator, (unsigned long long)p->std, p->capabilities); } static void v4l_print_audio(const void *arg, bool write_only) { const struct v4l2_audio *p = arg; if (write_only) pr_cont("index=%u, mode=0x%x\n", p->index, p->mode); else pr_cont("index=%u, name=%.*s, capability=0x%x, mode=0x%x\n", p->index, (int)sizeof(p->name), p->name, p->capability, p->mode); } static void v4l_print_audioout(const void *arg, bool write_only) { const struct v4l2_audioout *p = arg; if (write_only) pr_cont("index=%u\n", p->index); else pr_cont("index=%u, name=%.*s, capability=0x%x, mode=0x%x\n", p->index, (int)sizeof(p->name), p->name, p->capability, p->mode); } static void v4l_print_fmtdesc(const void *arg, bool write_only) { const struct v4l2_fmtdesc *p = arg; pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%p4cc, mbus_code=0x%04x, description='%.*s'\n", p->index, prt_names(p->type, v4l2_type_names), p->flags, &p->pixelformat, p->mbus_code, (int)sizeof(p->description), p->description); } static void v4l_print_format(const void *arg, bool write_only) { const struct v4l2_format *p = arg; const struct v4l2_pix_format *pix; const struct v4l2_pix_format_mplane *mp; const struct v4l2_vbi_format *vbi; const struct v4l2_sliced_vbi_format *sliced; const struct v4l2_window *win; const struct v4l2_meta_format *meta; u32 pixelformat; u32 planes; unsigned i; pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); switch (p->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: pix = &p->fmt.pix; pr_cont(", width=%u, height=%u, pixelformat=%p4cc, field=%s, bytesperline=%u, sizeimage=%u, colorspace=%d, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n", pix->width, pix->height, &pix->pixelformat, prt_names(pix->field, v4l2_field_names), pix->bytesperline, pix->sizeimage, pix->colorspace, pix->flags, pix->ycbcr_enc, pix->quantization, pix->xfer_func); break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: mp = &p->fmt.pix_mp; pixelformat = mp->pixelformat; pr_cont(", width=%u, height=%u, format=%p4cc, field=%s, colorspace=%d, num_planes=%u, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n", mp->width, mp->height, &pixelformat, prt_names(mp->field, v4l2_field_names), mp->colorspace, mp->num_planes, mp->flags, mp->ycbcr_enc, mp->quantization, mp->xfer_func); planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES); for (i = 0; i < planes; i++) printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, mp->plane_fmt[i].bytesperline, mp->plane_fmt[i].sizeimage); break; case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: win = &p->fmt.win; pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, global_alpha=0x%02x\n", win->w.width, win->w.height, win->w.left, win->w.top, prt_names(win->field, v4l2_field_names), win->chromakey, win->global_alpha); break; case V4L2_BUF_TYPE_VBI_CAPTURE: case V4L2_BUF_TYPE_VBI_OUTPUT: vbi = &p->fmt.vbi; pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, sample_format=%p4cc, start=%u,%u, count=%u,%u\n", vbi->sampling_rate, vbi->offset, vbi->samples_per_line, &vbi->sample_format, vbi->start[0], vbi->start[1], vbi->count[0], vbi->count[1]); break; case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: sliced = &p->fmt.sliced; pr_cont(", service_set=0x%08x, io_size=%d\n", sliced->service_set, sliced->io_size); for (i = 0; i < 24; i++) printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i, sliced->service_lines[0][i], sliced->service_lines[1][i]); break; case V4L2_BUF_TYPE_SDR_CAPTURE: case V4L2_BUF_TYPE_SDR_OUTPUT: pixelformat = p->fmt.sdr.pixelformat; pr_cont(", pixelformat=%p4cc\n", &pixelformat); break; case V4L2_BUF_TYPE_META_CAPTURE: case V4L2_BUF_TYPE_META_OUTPUT: meta = &p->fmt.meta; pixelformat = meta->dataformat; pr_cont(", dataformat=%p4cc, buffersize=%u, width=%u, height=%u, bytesperline=%u\n", &pixelformat, meta->buffersize, meta->width, meta->height, meta->bytesperline); break; } } static void v4l_print_framebuffer(const void *arg, bool write_only) { const struct v4l2_framebuffer *p = arg; pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, height=%u, pixelformat=%p4cc, bytesperline=%u, sizeimage=%u, colorspace=%d\n", p->capability, p->flags, p->base, p->fmt.width, p->fmt.height, &p->fmt.pixelformat, p->fmt.bytesperline, p->fmt.sizeimage, p->fmt.colorspace); } static void v4l_print_buftype(const void *arg, bool write_only) { pr_cont("type=%s\n", prt_names(*(u32 *)arg, v4l2_type_names)); } static void v4l_print_modulator(const void *arg, bool write_only) { const struct v4l2_modulator *p = arg; if (write_only) pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans); else pr_cont("index=%u, name=%.*s, capability=0x%x, rangelow=%u, rangehigh=%u, txsubchans=0x%x\n", p->index, (int)sizeof(p->name), p->name, p->capability, p->rangelow, p->rangehigh, p->txsubchans); } static void v4l_print_tuner(const void *arg, bool write_only) { const struct v4l2_tuner *p = arg; if (write_only) pr_cont("index=%u, audmode=%u\n", p->index, p->audmode); else pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, rangelow=%u, rangehigh=%u, signal=%u, afc=%d, rxsubchans=0x%x, audmode=%u\n", p->index, (int)sizeof(p->name), p->name, p->type, p->capability, p->rangelow, p->rangehigh, p->signal, p->afc, p->rxsubchans, p->audmode); } static void v4l_print_frequency(const void *arg, bool write_only) { const struct v4l2_frequency *p = arg; pr_cont("tuner=%u, type=%u, frequency=%u\n", p->tuner, p->type, p->frequency); } static void v4l_print_standard(const void *arg, bool write_only) { const struct v4l2_standard *p = arg; pr_cont("index=%u, id=0x%Lx, name=%.*s, fps=%u/%u, framelines=%u\n", p->index, (unsigned long long)p->id, (int)sizeof(p->name), p->name, p->frameperiod.numerator, p->frameperiod.denominator, p->framelines); } static void v4l_print_std(const void *arg, bool write_only) { pr_cont("std=0x%08Lx\n", *(const long long unsigned *)arg); } static void v4l_print_hw_freq_seek(const void *arg, bool write_only) { const struct v4l2_hw_freq_seek *p = arg; pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, rangelow=%u, rangehigh=%u\n", p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing, p->rangelow, p->rangehigh); } static void v4l_print_requestbuffers(const void *arg, bool write_only) { const struct v4l2_requestbuffers *p = arg; pr_cont("count=%d, type=%s, memory=%s\n", p->count, prt_names(p->type, v4l2_type_names), prt_names(p->memory, v4l2_memory_names)); } static void v4l_print_buffer(const void *arg, bool write_only) { const struct v4l2_buffer *p = arg; const struct v4l2_timecode *tc = &p->timecode; const struct v4l2_plane *plane; int i; pr_cont("%02d:%02d:%02d.%06ld index=%d, type=%s, request_fd=%d, flags=0x%08x, field=%s, sequence=%d, memory=%s", (int)p->timestamp.tv_sec / 3600, ((int)p->timestamp.tv_sec / 60) % 60, ((int)p->timestamp.tv_sec % 60), (long)p->timestamp.tv_usec, p->index, prt_names(p->type, v4l2_type_names), p->request_fd, p->flags, prt_names(p->field, v4l2_field_names), p->sequence, prt_names(p->memory, v4l2_memory_names)); if (V4L2_TYPE_IS_MULTIPLANAR(p->type) && p->m.planes) { pr_cont("\n"); for (i = 0; i < p->length; ++i) { plane = &p->m.planes[i]; printk(KERN_DEBUG "plane %d: bytesused=%d, data_offset=0x%08x, offset/userptr=0x%lx, length=%d\n", i, plane->bytesused, plane->data_offset, plane->m.userptr, plane->length); } } else { pr_cont(", bytesused=%d, offset/userptr=0x%lx, length=%d\n", p->bytesused, p->m.userptr, p->length); } printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, flags=0x%08x, frames=%d, userbits=0x%08x\n", tc->hours, tc->minutes, tc->seconds, tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits); } static void v4l_print_exportbuffer(const void *arg, bool write_only) { const struct v4l2_exportbuffer *p = arg; pr_cont("fd=%d, type=%s, index=%u, plane=%u, flags=0x%08x\n", p->fd, prt_names(p->type, v4l2_type_names), p->index, p->plane, p->flags); } static void v4l_print_create_buffers(const void *arg, bool write_only) { const struct v4l2_create_buffers *p = arg; pr_cont("index=%d, count=%d, memory=%s, capabilities=0x%08x, max num buffers=%u", p->index, p->count, prt_names(p->memory, v4l2_memory_names), p->capabilities, p->max_num_buffers); v4l_print_format(&p->format, write_only); } static void v4l_print_remove_buffers(const void *arg, bool write_only) { const struct v4l2_remove_buffers *p = arg; pr_cont("type=%s, index=%u, count=%u\n", prt_names(p->type, v4l2_type_names), p->index, p->count); } static void v4l_print_streamparm(const void *arg, bool write_only) { const struct v4l2_streamparm *p = arg; pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE || p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { const struct v4l2_captureparm *c = &p->parm.capture; pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, extendedmode=%d, readbuffers=%d\n", c->capability, c->capturemode, c->timeperframe.numerator, c->timeperframe.denominator, c->extendedmode, c->readbuffers); } else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT || p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { const struct v4l2_outputparm *c = &p->parm.output; pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, extendedmode=%d, writebuffers=%d\n", c->capability, c->outputmode, c->timeperframe.numerator, c->timeperframe.denominator, c->extendedmode, c->writebuffers); } else { pr_cont("\n"); } } static void v4l_print_queryctrl(const void *arg, bool write_only) { const struct v4l2_queryctrl *p = arg; pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%d/%d, step=%d, default=%d, flags=0x%08x\n", p->id, p->type, (int)sizeof(p->name), p->name, p->minimum, p->maximum, p->step, p->default_value, p->flags); } static void v4l_print_query_ext_ctrl(const void *arg, bool write_only) { const struct v4l2_query_ext_ctrl *p = arg; pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%lld/%lld, step=%lld, default=%lld, flags=0x%08x, elem_size=%u, elems=%u, nr_of_dims=%u, dims=%u,%u,%u,%u\n", p->id, p->type, (int)sizeof(p->name), p->name, p->minimum, p->maximum, p->step, p->default_value, p->flags, p->elem_size, p->elems, p->nr_of_dims, p->dims[0], p->dims[1], p->dims[2], p->dims[3]); } static void v4l_print_querymenu(const void *arg, bool write_only) { const struct v4l2_querymenu *p = arg; pr_cont("id=0x%x, index=%d\n", p->id, p->index); } static void v4l_print_control(const void *arg, bool write_only) { const struct v4l2_control *p = arg; const char *name = v4l2_ctrl_get_name(p->id); if (name) pr_cont("name=%s, ", name); pr_cont("id=0x%x, value=%d\n", p->id, p->value); } static void v4l_print_ext_controls(const void *arg, bool write_only) { const struct v4l2_ext_controls *p = arg; int i; pr_cont("which=0x%x, count=%d, error_idx=%d, request_fd=%d", p->which, p->count, p->error_idx, p->request_fd); for (i = 0; i < p->count; i++) { unsigned int id = p->controls[i].id; const char *name = v4l2_ctrl_get_name(id); if (name) pr_cont(", name=%s", name); if (!p->controls[i].size) pr_cont(", id/val=0x%x/0x%x", id, p->controls[i].value); else pr_cont(", id/size=0x%x/%u", id, p->controls[i].size); } pr_cont("\n"); } static void v4l_print_cropcap(const void *arg, bool write_only) { const struct v4l2_cropcap *p = arg; pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, defrect wxh=%dx%d, x,y=%d,%d, pixelaspect %d/%d\n", prt_names(p->type, v4l2_type_names), p->bounds.width, p->bounds.height, p->bounds.left, p->bounds.top, p->defrect.width, p->defrect.height, p->defrect.left, p->defrect.top, p->pixelaspect.numerator, p->pixelaspect.denominator); } static void v4l_print_crop(const void *arg, bool write_only) { const struct v4l2_crop *p = arg; pr_cont("type=%s, wxh=%dx%d, x,y=%d,%d\n", prt_names(p->type, v4l2_type_names), p->c.width, p->c.height, p->c.left, p->c.top); } static void v4l_print_selection(const void *arg, bool write_only) { const struct v4l2_selection *p = arg; pr_cont("type=%s, target=%d, flags=0x%x, wxh=%dx%d, x,y=%d,%d\n", prt_names(p->type, v4l2_type_names), p->target, p->flags, p->r.width, p->r.height, p->r.left, p->r.top); } static void v4l_print_jpegcompression(const void *arg, bool write_only) { const struct v4l2_jpegcompression *p = arg; pr_cont("quality=%d, APPn=%d, APP_len=%d, COM_len=%d, jpeg_markers=0x%x\n", p->quality, p->APPn, p->APP_len, p->COM_len, p->jpeg_markers); } static void v4l_print_enc_idx(const void *arg, bool write_only) { const struct v4l2_enc_idx *p = arg; pr_cont("entries=%d, entries_cap=%d\n", p->entries, p->entries_cap); } static void v4l_print_encoder_cmd(const void *arg, bool write_only) { const struct v4l2_encoder_cmd *p = arg; pr_cont("cmd=%d, flags=0x%x\n", p->cmd, p->flags); } static void v4l_print_decoder_cmd(const void *arg, bool write_only) { const struct v4l2_decoder_cmd *p = arg; pr_cont("cmd=%d, flags=0x%x\n", p->cmd, p->flags); if (p->cmd == V4L2_DEC_CMD_START) pr_info("speed=%d, format=%u\n", p->start.speed, p->start.format); else if (p->cmd == V4L2_DEC_CMD_STOP) pr_info("pts=%llu\n", p->stop.pts); } static void v4l_print_dbg_chip_info(const void *arg, bool write_only) { const struct v4l2_dbg_chip_info *p = arg; pr_cont("type=%u, ", p->match.type); if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER) pr_cont("name=%.*s, ", (int)sizeof(p->match.name), p->match.name); else pr_cont("addr=%u, ", p->match.addr); pr_cont("name=%.*s\n", (int)sizeof(p->name), p->name); } static void v4l_print_dbg_register(const void *arg, bool write_only) { const struct v4l2_dbg_register *p = arg; pr_cont("type=%u, ", p->match.type); if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER) pr_cont("name=%.*s, ", (int)sizeof(p->match.name), p->match.name); else pr_cont("addr=%u, ", p->match.addr); pr_cont("reg=0x%llx, val=0x%llx\n", p->reg, p->val); } static void v4l_print_dv_timings(const void *arg, bool write_only) { const struct v4l2_dv_timings *p = arg; switch (p->type) { case V4L2_DV_BT_656_1120: pr_cont("type=bt-656/1120, interlaced=%u, pixelclock=%llu, width=%u, height=%u, polarities=0x%x, hfrontporch=%u, hsync=%u, hbackporch=%u, vfrontporch=%u, vsync=%u, vbackporch=%u, il_vfrontporch=%u, il_vsync=%u, il_vbackporch=%u, standards=0x%x, flags=0x%x\n", p->bt.interlaced, p->bt.pixelclock, p->bt.width, p->bt.height, p->bt.polarities, p->bt.hfrontporch, p->bt.hsync, p->bt.hbackporch, p->bt.vfrontporch, p->bt.vsync, p->bt.vbackporch, p->bt.il_vfrontporch, p->bt.il_vsync, p->bt.il_vbackporch, p->bt.standards, p->bt.flags); break; default: pr_cont("type=%d\n", p->type); break; } } static void v4l_print_enum_dv_timings(const void *arg, bool write_only) { const struct v4l2_enum_dv_timings *p = arg; pr_cont("index=%u, ", p->index); v4l_print_dv_timings(&p->timings, write_only); } static void v4l_print_dv_timings_cap(const void *arg, bool write_only) { const struct v4l2_dv_timings_cap *p = arg; switch (p->type) { case V4L2_DV_BT_656_1120: pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n", p->bt.min_width, p->bt.max_width, p->bt.min_height, p->bt.max_height, p->bt.min_pixelclock, p->bt.max_pixelclock, p->bt.standards, p->bt.capabilities); break; default: pr_cont("type=%u\n", p->type); break; } } static void v4l_print_frmsizeenum(const void *arg, bool write_only) { const struct v4l2_frmsizeenum *p = arg; pr_cont("index=%u, pixelformat=%p4cc, type=%u", p->index, &p->pixel_format, p->type); switch (p->type) { case V4L2_FRMSIZE_TYPE_DISCRETE: pr_cont(", wxh=%ux%u\n", p->discrete.width, p->discrete.height); break; case V4L2_FRMSIZE_TYPE_STEPWISE: pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n", p->stepwise.min_width, p->stepwise.min_height, p->stepwise.max_width, p->stepwise.max_height, p->stepwise.step_width, p->stepwise.step_height); break; case V4L2_FRMSIZE_TYPE_CONTINUOUS: default: pr_cont("\n"); break; } } static void v4l_print_frmivalenum(const void *arg, bool write_only) { const struct v4l2_frmivalenum *p = arg; pr_cont("index=%u, pixelformat=%p4cc, wxh=%ux%u, type=%u", p->index, &p->pixel_format, p->width, p->height, p->type); switch (p->type) { case V4L2_FRMIVAL_TYPE_DISCRETE: pr_cont(", fps=%d/%d\n", p->discrete.numerator, p->discrete.denominator); break; case V4L2_FRMIVAL_TYPE_STEPWISE: pr_cont(", min=%d/%d, max=%d/%d, step=%d/%d\n", p->stepwise.min.numerator, p->stepwise.min.denominator, p->stepwise.max.numerator, p->stepwise.max.denominator, p->stepwise.step.numerator, p->stepwise.step.denominator); break; case V4L2_FRMIVAL_TYPE_CONTINUOUS: default: pr_cont("\n"); break; } } static void v4l_print_event(const void *arg, bool write_only) { const struct v4l2_event *p = arg; const struct v4l2_event_ctrl *c; pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, timestamp=%llu.%9.9llu\n", p->type, p->pending, p->sequence, p->id, p->timestamp.tv_sec, p->timestamp.tv_nsec); switch (p->type) { case V4L2_EVENT_VSYNC: printk(KERN_DEBUG "field=%s\n", prt_names(p->u.vsync.field, v4l2_field_names)); break; case V4L2_EVENT_CTRL: c = &p->u.ctrl; printk(KERN_DEBUG "changes=0x%x, type=%u, ", c->changes, c->type); if (c->type == V4L2_CTRL_TYPE_INTEGER64) pr_cont("value64=%lld, ", c->value64); else pr_cont("value=%d, ", c->value); pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, default_value=%d\n", c->flags, c->minimum, c->maximum, c->step, c->default_value); break; case V4L2_EVENT_FRAME_SYNC: pr_cont("frame_sequence=%u\n", p->u.frame_sync.frame_sequence); break; } } static void v4l_print_event_subscription(const void *arg, bool write_only) { const struct v4l2_event_subscription *p = arg; pr_cont("type=0x%x, id=0x%x, flags=0x%x\n", p->type, p->id, p->flags); } static void v4l_print_sliced_vbi_cap(const void *arg, bool write_only) { const struct v4l2_sliced_vbi_cap *p = arg; int i; pr_cont("type=%s, service_set=0x%08x\n", prt_names(p->type, v4l2_type_names), p->service_set); for (i = 0; i < 24; i++) printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i, p->service_lines[0][i], p->service_lines[1][i]); } static void v4l_print_freq_band(const void *arg, bool write_only) { const struct v4l2_frequency_band *p = arg; pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, rangelow=%u, rangehigh=%u, modulation=0x%x\n", p->tuner, p->type, p->index, p->capability, p->rangelow, p->rangehigh, p->modulation); } static void v4l_print_edid(const void *arg, bool write_only) { const struct v4l2_edid *p = arg; pr_cont("pad=%u, start_block=%u, blocks=%u\n", p->pad, p->start_block, p->blocks); } static void v4l_print_u32(const void *arg, bool write_only) { pr_cont("value=%u\n", *(const u32 *)arg); } static void v4l_print_newline(const void *arg, bool write_only) { pr_cont("\n"); } static void v4l_print_default(const void *arg, bool write_only) { pr_cont("driver-specific ioctl\n"); } static bool check_ext_ctrls(struct v4l2_ext_controls *c, unsigned long ioctl) { __u32 i; /* zero the reserved fields */ c->reserved[0] = 0; for (i = 0; i < c->count; i++) c->controls[i].reserved2[0] = 0; switch (c->which) { case V4L2_CID_PRIVATE_BASE: /* * V4L2_CID_PRIVATE_BASE cannot be used as control class * when using extended controls. * Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL * is it allowed for backwards compatibility. */ if (ioctl == VIDIOC_G_CTRL || ioctl == VIDIOC_S_CTRL) return false; break; case V4L2_CTRL_WHICH_DEF_VAL: /* Default value cannot be changed */ if (ioctl == VIDIOC_S_EXT_CTRLS || ioctl == VIDIOC_TRY_EXT_CTRLS) { c->error_idx = c->count; return false; } return true; case V4L2_CTRL_WHICH_CUR_VAL: return true; case V4L2_CTRL_WHICH_REQUEST_VAL: c->error_idx = c->count; return false; } /* Check that all controls are from the same control class. */ for (i = 0; i < c->count; i++) { if (V4L2_CTRL_ID2WHICH(c->controls[i].id) != c->which) { c->error_idx = ioctl == VIDIOC_TRY_EXT_CTRLS ? i : c->count; return false; } } return true; } static int check_fmt(struct file *file, enum v4l2_buf_type type) { const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE; const u32 meta_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_META_OUTPUT; struct video_device *vfd = video_devdata(file); const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; bool is_vid = vfd->vfl_type == VFL_TYPE_VIDEO && (vfd->device_caps & vid_caps); bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI; bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; bool is_meta = vfd->vfl_type == VFL_TYPE_VIDEO && (vfd->device_caps & meta_caps); bool is_rx = vfd->vfl_dir != VFL_DIR_TX; bool is_tx = vfd->vfl_dir != VFL_DIR_RX; if (ops == NULL) return -EINVAL; switch (type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: if ((is_vid || is_tch) && is_rx && (ops->vidioc_g_fmt_vid_cap || ops->vidioc_g_fmt_vid_cap_mplane)) return 0; break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: if ((is_vid || is_tch) && is_rx && ops->vidioc_g_fmt_vid_cap_mplane) return 0; break; case V4L2_BUF_TYPE_VIDEO_OVERLAY: if (is_vid && is_rx && ops->vidioc_g_fmt_vid_overlay) return 0; break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: if (is_vid && is_tx && (ops->vidioc_g_fmt_vid_out || ops->vidioc_g_fmt_vid_out_mplane)) return 0; break; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: if (is_vid && is_tx && ops->vidioc_g_fmt_vid_out_mplane) return 0; break; case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: if (is_vid && is_tx && ops->vidioc_g_fmt_vid_out_overlay) return 0; break; case V4L2_BUF_TYPE_VBI_CAPTURE: if (is_vbi && is_rx && ops->vidioc_g_fmt_vbi_cap) return 0; break; case V4L2_BUF_TYPE_VBI_OUTPUT: if (is_vbi && is_tx && ops->vidioc_g_fmt_vbi_out) return 0; break; case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: if (is_vbi && is_rx && ops->vidioc_g_fmt_sliced_vbi_cap) return 0; break; case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: if (is_vbi && is_tx && ops->vidioc_g_fmt_sliced_vbi_out) return 0; break; case V4L2_BUF_TYPE_SDR_CAPTURE: if (is_sdr && is_rx && ops->vidioc_g_fmt_sdr_cap) return 0; break; case V4L2_BUF_TYPE_SDR_OUTPUT: if (is_sdr && is_tx && ops->vidioc_g_fmt_sdr_out) return 0; break; case V4L2_BUF_TYPE_META_CAPTURE: if (is_meta && is_rx && ops->vidioc_g_fmt_meta_cap) return 0; break; case V4L2_BUF_TYPE_META_OUTPUT: if (is_meta && is_tx && ops->vidioc_g_fmt_meta_out) return 0; break; default: break; } return -EINVAL; } static void v4l_sanitize_colorspace(u32 pixelformat, u32 *colorspace, u32 *encoding, u32 *quantization, u32 *xfer_func) { bool is_hsv = pixelformat == V4L2_PIX_FMT_HSV24 || pixelformat == V4L2_PIX_FMT_HSV32; if (!v4l2_is_colorspace_valid(*colorspace)) { *colorspace = V4L2_COLORSPACE_DEFAULT; *encoding = V4L2_YCBCR_ENC_DEFAULT; *quantization = V4L2_QUANTIZATION_DEFAULT; *xfer_func = V4L2_XFER_FUNC_DEFAULT; } if ((!is_hsv && !v4l2_is_ycbcr_enc_valid(*encoding)) || (is_hsv && !v4l2_is_hsv_enc_valid(*encoding))) *encoding = V4L2_YCBCR_ENC_DEFAULT; if (!v4l2_is_quant_valid(*quantization)) *quantization = V4L2_QUANTIZATION_DEFAULT; if (!v4l2_is_xfer_func_valid(*xfer_func)) *xfer_func = V4L2_XFER_FUNC_DEFAULT; } static void v4l_sanitize_format(struct v4l2_format *fmt) { unsigned int offset; /* Make sure num_planes is not bogus */ if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE || fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) fmt->fmt.pix_mp.num_planes = min_t(u32, fmt->fmt.pix_mp.num_planes, VIDEO_MAX_PLANES); /* * The v4l2_pix_format structure has been extended with fields that were * not previously required to be set to zero by applications. The priv * field, when set to a magic value, indicates that the extended fields * are valid. Otherwise they will contain undefined values. To simplify * the API towards drivers zero the extended fields and set the priv * field to the magic value when the extended pixel format structure * isn't used by applications. */ if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE || fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { if (fmt->fmt.pix.priv != V4L2_PIX_FMT_PRIV_MAGIC) { fmt->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; offset = offsetof(struct v4l2_pix_format, priv) + sizeof(fmt->fmt.pix.priv); memset(((void *)&fmt->fmt.pix) + offset, 0, sizeof(fmt->fmt.pix) - offset); } } /* Replace invalid colorspace values with defaults. */ if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE || fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { v4l_sanitize_colorspace(fmt->fmt.pix.pixelformat, &fmt->fmt.pix.colorspace, &fmt->fmt.pix.ycbcr_enc, &fmt->fmt.pix.quantization, &fmt->fmt.pix.xfer_func); } else if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE || fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { u32 ycbcr_enc = fmt->fmt.pix_mp.ycbcr_enc; u32 quantization = fmt->fmt.pix_mp.quantization; u32 xfer_func = fmt->fmt.pix_mp.xfer_func; v4l_sanitize_colorspace(fmt->fmt.pix_mp.pixelformat, &fmt->fmt.pix_mp.colorspace, &ycbcr_enc, &quantization, &xfer_func); fmt->fmt.pix_mp.ycbcr_enc = ycbcr_enc; fmt->fmt.pix_mp.quantization = quantization; fmt->fmt.pix_mp.xfer_func = xfer_func; } } static int v4l_querycap(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_capability *cap = (struct v4l2_capability *)arg; struct video_device *vfd = video_devdata(file); int ret; cap->version = LINUX_VERSION_CODE; cap->device_caps = vfd->device_caps; cap->capabilities = vfd->device_caps | V4L2_CAP_DEVICE_CAPS; media_set_bus_info(cap->bus_info, sizeof(cap->bus_info), vfd->dev_parent); ret = ops->vidioc_querycap(file, fh, cap); /* * Drivers must not change device_caps, so check for this and * warn if this happened. */ WARN_ON(cap->device_caps != vfd->device_caps); /* * Check that capabilities is a superset of * vfd->device_caps | V4L2_CAP_DEVICE_CAPS */ WARN_ON((cap->capabilities & (vfd->device_caps | V4L2_CAP_DEVICE_CAPS)) != (vfd->device_caps | V4L2_CAP_DEVICE_CAPS)); cap->capabilities |= V4L2_CAP_EXT_PIX_FORMAT; cap->device_caps |= V4L2_CAP_EXT_PIX_FORMAT; return ret; } static int v4l_g_input(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); if (vfd->device_caps & V4L2_CAP_IO_MC) { *(int *)arg = 0; return 0; } return ops->vidioc_g_input(file, fh, arg); } static int v4l_g_output(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); if (vfd->device_caps & V4L2_CAP_IO_MC) { *(int *)arg = 0; return 0; } return ops->vidioc_g_output(file, fh, arg); } static int v4l_s_input(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); int ret; ret = v4l_enable_media_source(vfd); if (ret) return ret; if (vfd->device_caps & V4L2_CAP_IO_MC) return *(int *)arg ? -EINVAL : 0; return ops->vidioc_s_input(file, fh, *(unsigned int *)arg); } static int v4l_s_output(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); if (vfd->device_caps & V4L2_CAP_IO_MC) return *(int *)arg ? -EINVAL : 0; return ops->vidioc_s_output(file, fh, *(unsigned int *)arg); } static int v4l_g_priority(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd; u32 *p = arg; vfd = video_devdata(file); *p = v4l2_prio_max(vfd->prio); return 0; } static int v4l_s_priority(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd; struct v4l2_fh *vfh; u32 *p = arg; vfd = video_devdata(file); if (!test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) return -ENOTTY; vfh = file->private_data; return v4l2_prio_change(vfd->prio, &vfh->prio, *p); } static int v4l_enuminput(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_input *p = arg; /* * We set the flags for CAP_DV_TIMINGS & * CAP_STD here based on ioctl handler provided by the * driver. If the driver doesn't support these * for a specific input, it must override these flags. */ if (is_valid_ioctl(vfd, VIDIOC_S_STD)) p->capabilities |= V4L2_IN_CAP_STD; if (vfd->device_caps & V4L2_CAP_IO_MC) { if (p->index) return -EINVAL; strscpy(p->name, vfd->name, sizeof(p->name)); p->type = V4L2_INPUT_TYPE_CAMERA; return 0; } return ops->vidioc_enum_input(file, fh, p); } static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_output *p = arg; /* * We set the flags for CAP_DV_TIMINGS & * CAP_STD here based on ioctl handler provided by the * driver. If the driver doesn't support these * for a specific output, it must override these flags. */ if (is_valid_ioctl(vfd, VIDIOC_S_STD)) p->capabilities |= V4L2_OUT_CAP_STD; if (vfd->device_caps & V4L2_CAP_IO_MC) { if (p->index) return -EINVAL; strscpy(p->name, vfd->name, sizeof(p->name)); p->type = V4L2_OUTPUT_TYPE_ANALOG; return 0; } return ops->vidioc_enum_output(file, fh, p); } static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) { const unsigned sz = sizeof(fmt->description); const char *descr = NULL; u32 flags = 0; /* * We depart from the normal coding style here since the descriptions * should be aligned so it is easy to see which descriptions will be * longer than 31 characters (the max length for a description). * And frankly, this is easier to read anyway. * * Note that gcc will use O(log N) comparisons to find the right case. */ switch (fmt->pixelformat) { /* Max description length mask: descr = "0123456789012345678901234567890" */ case V4L2_PIX_FMT_RGB332: descr = "8-bit RGB 3-3-2"; break; case V4L2_PIX_FMT_RGB444: descr = "16-bit A/XRGB 4-4-4-4"; break; case V4L2_PIX_FMT_ARGB444: descr = "16-bit ARGB 4-4-4-4"; break; case V4L2_PIX_FMT_XRGB444: descr = "16-bit XRGB 4-4-4-4"; break; case V4L2_PIX_FMT_RGBA444: descr = "16-bit RGBA 4-4-4-4"; break; case V4L2_PIX_FMT_RGBX444: descr = "16-bit RGBX 4-4-4-4"; break; case V4L2_PIX_FMT_ABGR444: descr = "16-bit ABGR 4-4-4-4"; break; case V4L2_PIX_FMT_XBGR444: descr = "16-bit XBGR 4-4-4-4"; break; case V4L2_PIX_FMT_BGRA444: descr = "16-bit BGRA 4-4-4-4"; break; case V4L2_PIX_FMT_BGRX444: descr = "16-bit BGRX 4-4-4-4"; break; case V4L2_PIX_FMT_RGB555: descr = "16-bit A/XRGB 1-5-5-5"; break; case V4L2_PIX_FMT_ARGB555: descr = "16-bit ARGB 1-5-5-5"; break; case V4L2_PIX_FMT_XRGB555: descr = "16-bit XRGB 1-5-5-5"; break; case V4L2_PIX_FMT_ABGR555: descr = "16-bit ABGR 1-5-5-5"; break; case V4L2_PIX_FMT_XBGR555: descr = "16-bit XBGR 1-5-5-5"; break; case V4L2_PIX_FMT_RGBA555: descr = "16-bit RGBA 5-5-5-1"; break; case V4L2_PIX_FMT_RGBX555: descr = "16-bit RGBX 5-5-5-1"; break; case V4L2_PIX_FMT_BGRA555: descr = "16-bit BGRA 5-5-5-1"; break; case V4L2_PIX_FMT_BGRX555: descr = "16-bit BGRX 5-5-5-1"; break; case V4L2_PIX_FMT_RGB565: descr = "16-bit RGB 5-6-5"; break; case V4L2_PIX_FMT_RGB555X: descr = "16-bit A/XRGB 1-5-5-5 BE"; break; case V4L2_PIX_FMT_ARGB555X: descr = "16-bit ARGB 1-5-5-5 BE"; break; case V4L2_PIX_FMT_XRGB555X: descr = "16-bit XRGB 1-5-5-5 BE"; break; case V4L2_PIX_FMT_RGB565X: descr = "16-bit RGB 5-6-5 BE"; break; case V4L2_PIX_FMT_BGR666: descr = "18-bit BGRX 6-6-6-14"; break; case V4L2_PIX_FMT_BGR24: descr = "24-bit BGR 8-8-8"; break; case V4L2_PIX_FMT_RGB24: descr = "24-bit RGB 8-8-8"; break; case V4L2_PIX_FMT_BGR32: descr = "32-bit BGRA/X 8-8-8-8"; break; case V4L2_PIX_FMT_ABGR32: descr = "32-bit BGRA 8-8-8-8"; break; case V4L2_PIX_FMT_XBGR32: descr = "32-bit BGRX 8-8-8-8"; break; case V4L2_PIX_FMT_RGB32: descr = "32-bit A/XRGB 8-8-8-8"; break; case V4L2_PIX_FMT_ARGB32: descr = "32-bit ARGB 8-8-8-8"; break; case V4L2_PIX_FMT_XRGB32: descr = "32-bit XRGB 8-8-8-8"; break; case V4L2_PIX_FMT_BGRA32: descr = "32-bit ABGR 8-8-8-8"; break; case V4L2_PIX_FMT_BGRX32: descr = "32-bit XBGR 8-8-8-8"; break; case V4L2_PIX_FMT_RGBA32: descr = "32-bit RGBA 8-8-8-8"; break; case V4L2_PIX_FMT_RGBX32: descr = "32-bit RGBX 8-8-8-8"; break; case V4L2_PIX_FMT_RGBX1010102: descr = "32-bit RGBX 10-10-10-2"; break; case V4L2_PIX_FMT_RGBA1010102: descr = "32-bit RGBA 10-10-10-2"; break; case V4L2_PIX_FMT_ARGB2101010: descr = "32-bit ARGB 2-10-10-10"; break; case V4L2_PIX_FMT_BGR48: descr = "48-bit BGR 16-16-16"; break; case V4L2_PIX_FMT_RGB48: descr = "48-bit RGB 16-16-16"; break; case V4L2_PIX_FMT_BGR48_12: descr = "12-bit Depth BGR"; break; case V4L2_PIX_FMT_ABGR64_12: descr = "12-bit Depth BGRA"; break; case V4L2_PIX_FMT_GREY: descr = "8-bit Greyscale"; break; case V4L2_PIX_FMT_Y4: descr = "4-bit Greyscale"; break; case V4L2_PIX_FMT_Y6: descr = "6-bit Greyscale"; break; case V4L2_PIX_FMT_Y10: descr = "10-bit Greyscale"; break; case V4L2_PIX_FMT_Y12: descr = "12-bit Greyscale"; break; case V4L2_PIX_FMT_Y012: descr = "12-bit Greyscale (bits 15-4)"; break; case V4L2_PIX_FMT_Y14: descr = "14-bit Greyscale"; break; case V4L2_PIX_FMT_Y16: descr = "16-bit Greyscale"; break; case V4L2_PIX_FMT_Y16_BE: descr = "16-bit Greyscale BE"; break; case V4L2_PIX_FMT_Y10BPACK: descr = "10-bit Greyscale (Packed)"; break; case V4L2_PIX_FMT_Y10P: descr = "10-bit Greyscale (MIPI Packed)"; break; case V4L2_PIX_FMT_IPU3_Y10: descr = "10-bit greyscale (IPU3 Packed)"; break; case V4L2_PIX_FMT_Y12P: descr = "12-bit Greyscale (MIPI Packed)"; break; case V4L2_PIX_FMT_Y14P: descr = "14-bit Greyscale (MIPI Packed)"; break; case V4L2_PIX_FMT_Y8I: descr = "Interleaved 8-bit Greyscale"; break; case V4L2_PIX_FMT_Y12I: descr = "Interleaved 12-bit Greyscale"; break; case V4L2_PIX_FMT_Z16: descr = "16-bit Depth"; break; case V4L2_PIX_FMT_INZI: descr = "Planar 10:16 Greyscale Depth"; break; case V4L2_PIX_FMT_CNF4: descr = "4-bit Depth Confidence (Packed)"; break; case V4L2_PIX_FMT_PAL8: descr = "8-bit Palette"; break; case V4L2_PIX_FMT_UV8: descr = "8-bit Chrominance UV 4-4"; break; case V4L2_PIX_FMT_YVU410: descr = "Planar YVU 4:1:0"; break; case V4L2_PIX_FMT_YVU420: descr = "Planar YVU 4:2:0"; break; case V4L2_PIX_FMT_YUYV: descr = "YUYV 4:2:2"; break; case V4L2_PIX_FMT_YYUV: descr = "YYUV 4:2:2"; break; case V4L2_PIX_FMT_YVYU: descr = "YVYU 4:2:2"; break; case V4L2_PIX_FMT_UYVY: descr = "UYVY 4:2:2"; break; case V4L2_PIX_FMT_VYUY: descr = "VYUY 4:2:2"; break; case V4L2_PIX_FMT_YUV422P: descr = "Planar YUV 4:2:2"; break; case V4L2_PIX_FMT_YUV411P: descr = "Planar YUV 4:1:1"; break; case V4L2_PIX_FMT_Y41P: descr = "YUV 4:1:1 (Packed)"; break; case V4L2_PIX_FMT_YUV444: descr = "16-bit A/XYUV 4-4-4-4"; break; case V4L2_PIX_FMT_YUV555: descr = "16-bit A/XYUV 1-5-5-5"; break; case V4L2_PIX_FMT_YUV565: descr = "16-bit YUV 5-6-5"; break; case V4L2_PIX_FMT_YUV24: descr = "24-bit YUV 4:4:4 8-8-8"; break; case V4L2_PIX_FMT_YUV32: descr = "32-bit A/XYUV 8-8-8-8"; break; case V4L2_PIX_FMT_AYUV32: descr = "32-bit AYUV 8-8-8-8"; break; case V4L2_PIX_FMT_XYUV32: descr = "32-bit XYUV 8-8-8-8"; break; case V4L2_PIX_FMT_VUYA32: descr = "32-bit VUYA 8-8-8-8"; break; case V4L2_PIX_FMT_VUYX32: descr = "32-bit VUYX 8-8-8-8"; break; case V4L2_PIX_FMT_YUVA32: descr = "32-bit YUVA 8-8-8-8"; break; case V4L2_PIX_FMT_YUVX32: descr = "32-bit YUVX 8-8-8-8"; break; case V4L2_PIX_FMT_YUV410: descr = "Planar YUV 4:1:0"; break; case V4L2_PIX_FMT_YUV420: descr = "Planar YUV 4:2:0"; break; case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break; case V4L2_PIX_FMT_M420: descr = "YUV 4:2:0 (M420)"; break; case V4L2_PIX_FMT_YUV48_12: descr = "12-bit YUV 4:4:4 Packed"; break; case V4L2_PIX_FMT_NV12: descr = "Y/UV 4:2:0"; break; case V4L2_PIX_FMT_NV21: descr = "Y/VU 4:2:0"; break; case V4L2_PIX_FMT_NV16: descr = "Y/UV 4:2:2"; break; case V4L2_PIX_FMT_NV61: descr = "Y/VU 4:2:2"; break; case V4L2_PIX_FMT_NV24: descr = "Y/UV 4:4:4"; break; case V4L2_PIX_FMT_NV42: descr = "Y/VU 4:4:4"; break; case V4L2_PIX_FMT_P010: descr = "10-bit Y/UV 4:2:0"; break; case V4L2_PIX_FMT_P012: descr = "12-bit Y/UV 4:2:0"; break; case V4L2_PIX_FMT_NV12_4L4: descr = "Y/UV 4:2:0 (4x4 Linear)"; break; case V4L2_PIX_FMT_NV12_16L16: descr = "Y/UV 4:2:0 (16x16 Linear)"; break; case V4L2_PIX_FMT_NV12_32L32: descr = "Y/UV 4:2:0 (32x32 Linear)"; break; case V4L2_PIX_FMT_NV15_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break; case V4L2_PIX_FMT_P010_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break; case V4L2_PIX_FMT_NV12M: descr = "Y/UV 4:2:0 (N-C)"; break; case V4L2_PIX_FMT_NV21M: descr = "Y/VU 4:2:0 (N-C)"; break; case V4L2_PIX_FMT_NV16M: descr = "Y/UV 4:2:2 (N-C)"; break; case V4L2_PIX_FMT_NV61M: descr = "Y/VU 4:2:2 (N-C)"; break; case V4L2_PIX_FMT_NV12MT: descr = "Y/UV 4:2:0 (64x32 MB, N-C)"; break; case V4L2_PIX_FMT_NV12MT_16X16: descr = "Y/UV 4:2:0 (16x16 MB, N-C)"; break; case V4L2_PIX_FMT_P012M: descr = "12-bit Y/UV 4:2:0 (N-C)"; break; case V4L2_PIX_FMT_YUV420M: descr = "Planar YUV 4:2:0 (N-C)"; break; case V4L2_PIX_FMT_YVU420M: descr = "Planar YVU 4:2:0 (N-C)"; break; case V4L2_PIX_FMT_YUV422M: descr = "Planar YUV 4:2:2 (N-C)"; break; case V4L2_PIX_FMT_YVU422M: descr = "Planar YVU 4:2:2 (N-C)"; break; case V4L2_PIX_FMT_YUV444M: descr = "Planar YUV 4:4:4 (N-C)"; break; case V4L2_PIX_FMT_YVU444M: descr = "Planar YVU 4:4:4 (N-C)"; break; case V4L2_PIX_FMT_SBGGR8: descr = "8-bit Bayer BGBG/GRGR"; break; case V4L2_PIX_FMT_SGBRG8: descr = "8-bit Bayer GBGB/RGRG"; break; case V4L2_PIX_FMT_SGRBG8: descr = "8-bit Bayer GRGR/BGBG"; break; case V4L2_PIX_FMT_SRGGB8: descr = "8-bit Bayer RGRG/GBGB"; break; case V4L2_PIX_FMT_SBGGR10: descr = "10-bit Bayer BGBG/GRGR"; break; case V4L2_PIX_FMT_SGBRG10: descr = "10-bit Bayer GBGB/RGRG"; break; case V4L2_PIX_FMT_SGRBG10: descr = "10-bit Bayer GRGR/BGBG"; break; case V4L2_PIX_FMT_SRGGB10: descr = "10-bit Bayer RGRG/GBGB"; break; case V4L2_PIX_FMT_SBGGR10P: descr = "10-bit Bayer BGBG/GRGR Packed"; break; case V4L2_PIX_FMT_SGBRG10P: descr = "10-bit Bayer GBGB/RGRG Packed"; break; case V4L2_PIX_FMT_SGRBG10P: descr = "10-bit Bayer GRGR/BGBG Packed"; break; case V4L2_PIX_FMT_SRGGB10P: descr = "10-bit Bayer RGRG/GBGB Packed"; break; case V4L2_PIX_FMT_IPU3_SBGGR10: descr = "10-bit bayer BGGR IPU3 Packed"; break; case V4L2_PIX_FMT_IPU3_SGBRG10: descr = "10-bit bayer GBRG IPU3 Packed"; break; case V4L2_PIX_FMT_IPU3_SGRBG10: descr = "10-bit bayer GRBG IPU3 Packed"; break; case V4L2_PIX_FMT_IPU3_SRGGB10: descr = "10-bit bayer RGGB IPU3 Packed"; break; case V4L2_PIX_FMT_SBGGR10ALAW8: descr = "8-bit Bayer BGBG/GRGR (A-law)"; break; case V4L2_PIX_FMT_SGBRG10ALAW8: descr = "8-bit Bayer GBGB/RGRG (A-law)"; break; case V4L2_PIX_FMT_SGRBG10ALAW8: descr = "8-bit Bayer GRGR/BGBG (A-law)"; break; case V4L2_PIX_FMT_SRGGB10ALAW8: descr = "8-bit Bayer RGRG/GBGB (A-law)"; break; case V4L2_PIX_FMT_SBGGR10DPCM8: descr = "8-bit Bayer BGBG/GRGR (DPCM)"; break; case V4L2_PIX_FMT_SGBRG10DPCM8: descr = "8-bit Bayer GBGB/RGRG (DPCM)"; break; case V4L2_PIX_FMT_SGRBG10DPCM8: descr = "8-bit Bayer GRGR/BGBG (DPCM)"; break; case V4L2_PIX_FMT_SRGGB10DPCM8: descr = "8-bit Bayer RGRG/GBGB (DPCM)"; break; case V4L2_PIX_FMT_SBGGR12: descr = "12-bit Bayer BGBG/GRGR"; break; case V4L2_PIX_FMT_SGBRG12: descr = "12-bit Bayer GBGB/RGRG"; break; case V4L2_PIX_FMT_SGRBG12: descr = "12-bit Bayer GRGR/BGBG"; break; case V4L2_PIX_FMT_SRGGB12: descr = "12-bit Bayer RGRG/GBGB"; break; case V4L2_PIX_FMT_SBGGR12P: descr = "12-bit Bayer BGBG/GRGR Packed"; break; case V4L2_PIX_FMT_SGBRG12P: descr = "12-bit Bayer GBGB/RGRG Packed"; break; case V4L2_PIX_FMT_SGRBG12P: descr = "12-bit Bayer GRGR/BGBG Packed"; break; case V4L2_PIX_FMT_SRGGB12P: descr = "12-bit Bayer RGRG/GBGB Packed"; break; case V4L2_PIX_FMT_SBGGR14: descr = "14-bit Bayer BGBG/GRGR"; break; case V4L2_PIX_FMT_SGBRG14: descr = "14-bit Bayer GBGB/RGRG"; break; case V4L2_PIX_FMT_SGRBG14: descr = "14-bit Bayer GRGR/BGBG"; break; case V4L2_PIX_FMT_SRGGB14: descr = "14-bit Bayer RGRG/GBGB"; break; case V4L2_PIX_FMT_SBGGR14P: descr = "14-bit Bayer BGBG/GRGR Packed"; break; case V4L2_PIX_FMT_SGBRG14P: descr = "14-bit Bayer GBGB/RGRG Packed"; break; case V4L2_PIX_FMT_SGRBG14P: descr = "14-bit Bayer GRGR/BGBG Packed"; break; case V4L2_PIX_FMT_SRGGB14P: descr = "14-bit Bayer RGRG/GBGB Packed"; break; case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR"; break; case V4L2_PIX_FMT_SGBRG16: descr = "16-bit Bayer GBGB/RGRG"; break; case V4L2_PIX_FMT_SGRBG16: descr = "16-bit Bayer GRGR/BGBG"; break; case V4L2_PIX_FMT_SRGGB16: descr = "16-bit Bayer RGRG/GBGB"; break; case V4L2_PIX_FMT_SN9C20X_I420: descr = "GSPCA SN9C20X I420"; break; case V4L2_PIX_FMT_SPCA501: descr = "GSPCA SPCA501"; break; case V4L2_PIX_FMT_SPCA505: descr = "GSPCA SPCA505"; break; case V4L2_PIX_FMT_SPCA508: descr = "GSPCA SPCA508"; break; case V4L2_PIX_FMT_STV0680: descr = "GSPCA STV0680"; break; case V4L2_PIX_FMT_TM6000: descr = "A/V + VBI Mux Packet"; break; case V4L2_PIX_FMT_CIT_YYVYUY: descr = "GSPCA CIT YYVYUY"; break; case V4L2_PIX_FMT_KONICA420: descr = "GSPCA KONICA420"; break; case V4L2_PIX_FMT_MM21: descr = "Mediatek 8-bit Block Format"; break; case V4L2_PIX_FMT_HSV24: descr = "24-bit HSV 8-8-8"; break; case V4L2_PIX_FMT_HSV32: descr = "32-bit XHSV 8-8-8-8"; break; case V4L2_SDR_FMT_CU8: descr = "Complex U8"; break; case V4L2_SDR_FMT_CU16LE: descr = "Complex U16LE"; break; case V4L2_SDR_FMT_CS8: descr = "Complex S8"; break; case V4L2_SDR_FMT_CS14LE: descr = "Complex S14LE"; break; case V4L2_SDR_FMT_RU12LE: descr = "Real U12LE"; break; case V4L2_SDR_FMT_PCU16BE: descr = "Planar Complex U16BE"; break; case V4L2_SDR_FMT_PCU18BE: descr = "Planar Complex U18BE"; break; case V4L2_SDR_FMT_PCU20BE: descr = "Planar Complex U20BE"; break; case V4L2_TCH_FMT_DELTA_TD16: descr = "16-bit Signed Deltas"; break; case V4L2_TCH_FMT_DELTA_TD08: descr = "8-bit Signed Deltas"; break; case V4L2_TCH_FMT_TU16: descr = "16-bit Unsigned Touch Data"; break; case V4L2_TCH_FMT_TU08: descr = "8-bit Unsigned Touch Data"; break; case V4L2_META_FMT_VSP1_HGO: descr = "R-Car VSP1 1-D Histogram"; break; case V4L2_META_FMT_VSP1_HGT: descr = "R-Car VSP1 2-D Histogram"; break; case V4L2_META_FMT_UVC: descr = "UVC Payload Header Metadata"; break; case V4L2_META_FMT_D4XX: descr = "Intel D4xx UVC Metadata"; break; case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break; case V4L2_META_FMT_RK_ISP1_PARAMS: descr = "Rockchip ISP1 3A Parameters"; break; case V4L2_META_FMT_RK_ISP1_STAT_3A: descr = "Rockchip ISP1 3A Statistics"; break; case V4L2_PIX_FMT_NV12_8L128: descr = "NV12 (8x128 Linear)"; break; case V4L2_PIX_FMT_NV12M_8L128: descr = "NV12M (8x128 Linear)"; break; case V4L2_PIX_FMT_NV12_10BE_8L128: descr = "10-bit NV12 (8x128 Linear, BE)"; break; case V4L2_PIX_FMT_NV12M_10BE_8L128: descr = "10-bit NV12M (8x128 Linear, BE)"; break; case V4L2_PIX_FMT_Y210: descr = "10-bit YUYV Packed"; break; case V4L2_PIX_FMT_Y212: descr = "12-bit YUYV Packed"; break; case V4L2_PIX_FMT_Y216: descr = "16-bit YUYV Packed"; break; case V4L2_META_FMT_RPI_BE_CFG: descr = "RPi PiSP BE Config format"; break; case V4L2_META_FMT_GENERIC_8: descr = "8-bit Generic Metadata"; break; case V4L2_META_FMT_GENERIC_CSI2_10: descr = "8-bit Generic Meta, 10b CSI-2"; break; case V4L2_META_FMT_GENERIC_CSI2_12: descr = "8-bit Generic Meta, 12b CSI-2"; break; case V4L2_META_FMT_GENERIC_CSI2_14: descr = "8-bit Generic Meta, 14b CSI-2"; break; case V4L2_META_FMT_GENERIC_CSI2_16: descr = "8-bit Generic Meta, 16b CSI-2"; break; case V4L2_META_FMT_GENERIC_CSI2_20: descr = "8-bit Generic Meta, 20b CSI-2"; break; case V4L2_META_FMT_GENERIC_CSI2_24: descr = "8-bit Generic Meta, 24b CSI-2"; break; default: /* Compressed formats */ flags = V4L2_FMT_FLAG_COMPRESSED; switch (fmt->pixelformat) { /* Max description length mask: descr = "0123456789012345678901234567890" */ case V4L2_PIX_FMT_MJPEG: descr = "Motion-JPEG"; break; case V4L2_PIX_FMT_JPEG: descr = "JFIF JPEG"; break; case V4L2_PIX_FMT_DV: descr = "1394"; break; case V4L2_PIX_FMT_MPEG: descr = "MPEG-1/2/4"; break; case V4L2_PIX_FMT_H264: descr = "H.264"; break; case V4L2_PIX_FMT_H264_NO_SC: descr = "H.264 (No Start Codes)"; break; case V4L2_PIX_FMT_H264_MVC: descr = "H.264 MVC"; break; case V4L2_PIX_FMT_H264_SLICE: descr = "H.264 Parsed Slice Data"; break; case V4L2_PIX_FMT_H263: descr = "H.263"; break; case V4L2_PIX_FMT_MPEG1: descr = "MPEG-1 ES"; break; case V4L2_PIX_FMT_MPEG2: descr = "MPEG-2 ES"; break; case V4L2_PIX_FMT_MPEG2_SLICE: descr = "MPEG-2 Parsed Slice Data"; break; case V4L2_PIX_FMT_MPEG4: descr = "MPEG-4 Part 2 ES"; break; case V4L2_PIX_FMT_XVID: descr = "Xvid"; break; case V4L2_PIX_FMT_VC1_ANNEX_G: descr = "VC-1 (SMPTE 412M Annex G)"; break; case V4L2_PIX_FMT_VC1_ANNEX_L: descr = "VC-1 (SMPTE 412M Annex L)"; break; case V4L2_PIX_FMT_VP8: descr = "VP8"; break; case V4L2_PIX_FMT_VP8_FRAME: descr = "VP8 Frame"; break; case V4L2_PIX_FMT_VP9: descr = "VP9"; break; case V4L2_PIX_FMT_VP9_FRAME: descr = "VP9 Frame"; break; case V4L2_PIX_FMT_HEVC: descr = "HEVC"; break; /* aka H.265 */ case V4L2_PIX_FMT_HEVC_SLICE: descr = "HEVC Parsed Slice Data"; break; case V4L2_PIX_FMT_FWHT: descr = "FWHT"; break; /* used in vicodec */ case V4L2_PIX_FMT_FWHT_STATELESS: descr = "FWHT Stateless"; break; /* used in vicodec */ case V4L2_PIX_FMT_SPK: descr = "Sorenson Spark"; break; case V4L2_PIX_FMT_RV30: descr = "RealVideo 8"; break; case V4L2_PIX_FMT_RV40: descr = "RealVideo 9 & 10"; break; case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break; case V4L2_PIX_FMT_WNVA: descr = "WNVA"; break; case V4L2_PIX_FMT_SN9C10X: descr = "GSPCA SN9C10X"; break; case V4L2_PIX_FMT_PWC1: descr = "Raw Philips Webcam Type (Old)"; break; case V4L2_PIX_FMT_PWC2: descr = "Raw Philips Webcam Type (New)"; break; case V4L2_PIX_FMT_ET61X251: descr = "GSPCA ET61X251"; break; case V4L2_PIX_FMT_SPCA561: descr = "GSPCA SPCA561"; break; case V4L2_PIX_FMT_PAC207: descr = "GSPCA PAC207"; break; case V4L2_PIX_FMT_MR97310A: descr = "GSPCA MR97310A"; break; case V4L2_PIX_FMT_JL2005BCD: descr = "GSPCA JL2005BCD"; break; case V4L2_PIX_FMT_SN9C2028: descr = "GSPCA SN9C2028"; break; case V4L2_PIX_FMT_SQ905C: descr = "GSPCA SQ905C"; break; case V4L2_PIX_FMT_PJPG: descr = "GSPCA PJPG"; break; case V4L2_PIX_FMT_OV511: descr = "GSPCA OV511"; break; case V4L2_PIX_FMT_OV518: descr = "GSPCA OV518"; break; case V4L2_PIX_FMT_JPGL: descr = "JPEG Lite"; break; case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break; case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break; case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break; case V4L2_PIX_FMT_QC08C: descr = "QCOM Compressed 8-bit Format"; break; case V4L2_PIX_FMT_QC10C: descr = "QCOM Compressed 10-bit Format"; break; case V4L2_PIX_FMT_AJPG: descr = "Aspeed JPEG"; break; case V4L2_PIX_FMT_AV1_FRAME: descr = "AV1 Frame"; break; case V4L2_PIX_FMT_MT2110T: descr = "Mediatek 10bit Tile Mode"; break; case V4L2_PIX_FMT_MT2110R: descr = "Mediatek 10bit Raster Mode"; break; case V4L2_PIX_FMT_HEXTILE: descr = "Hextile Compressed Format"; break; case V4L2_PIX_FMT_PISP_COMP1_RGGB: descr = "PiSP 8b RGRG/GBGB mode1 compr"; break; case V4L2_PIX_FMT_PISP_COMP1_GRBG: descr = "PiSP 8b GRGR/BGBG mode1 compr"; break; case V4L2_PIX_FMT_PISP_COMP1_GBRG: descr = "PiSP 8b GBGB/RGRG mode1 compr"; break; case V4L2_PIX_FMT_PISP_COMP1_BGGR: descr = "PiSP 8b BGBG/GRGR mode1 compr"; break; case V4L2_PIX_FMT_PISP_COMP1_MONO: descr = "PiSP 8b monochrome mode1 compr"; break; case V4L2_PIX_FMT_PISP_COMP2_RGGB: descr = "PiSP 8b RGRG/GBGB mode2 compr"; break; case V4L2_PIX_FMT_PISP_COMP2_GRBG: descr = "PiSP 8b GRGR/BGBG mode2 compr"; break; case V4L2_PIX_FMT_PISP_COMP2_GBRG: descr = "PiSP 8b GBGB/RGRG mode2 compr"; break; case V4L2_PIX_FMT_PISP_COMP2_BGGR: descr = "PiSP 8b BGBG/GRGR mode2 compr"; break; case V4L2_PIX_FMT_PISP_COMP2_MONO: descr = "PiSP 8b monochrome mode2 compr"; break; default: if (fmt->description[0]) return; WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat); flags = 0; snprintf(fmt->description, sz, "%p4cc", &fmt->pixelformat); break; } } if (fmt->type == V4L2_BUF_TYPE_META_CAPTURE) { switch (fmt->pixelformat) { case V4L2_META_FMT_GENERIC_8: case V4L2_META_FMT_GENERIC_CSI2_10: case V4L2_META_FMT_GENERIC_CSI2_12: case V4L2_META_FMT_GENERIC_CSI2_14: case V4L2_META_FMT_GENERIC_CSI2_16: case V4L2_META_FMT_GENERIC_CSI2_20: case V4L2_META_FMT_GENERIC_CSI2_24: fmt->flags |= V4L2_FMT_FLAG_META_LINE_BASED; break; default: fmt->flags &= ~V4L2_FMT_FLAG_META_LINE_BASED; } } if (descr) WARN_ON(strscpy(fmt->description, descr, sz) < 0); fmt->flags |= flags; } static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vdev = video_devdata(file); struct v4l2_fmtdesc *p = arg; int ret = check_fmt(file, p->type); u32 mbus_code; u32 cap_mask; if (ret) return ret; ret = -EINVAL; if (!(vdev->device_caps & V4L2_CAP_IO_MC)) p->mbus_code = 0; mbus_code = p->mbus_code; memset_after(p, 0, type); p->mbus_code = mbus_code; switch (p->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: cap_mask = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_M2M_MPLANE; if (!!(vdev->device_caps & cap_mask) != (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) break; if (unlikely(!ops->vidioc_enum_fmt_vid_cap)) break; ret = ops->vidioc_enum_fmt_vid_cap(file, fh, arg); break; case V4L2_BUF_TYPE_VIDEO_OVERLAY: if (unlikely(!ops->vidioc_enum_fmt_vid_overlay)) break; ret = ops->vidioc_enum_fmt_vid_overlay(file, fh, arg); break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: cap_mask = V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_VIDEO_M2M_MPLANE; if (!!(vdev->device_caps & cap_mask) != (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) break; if (unlikely(!ops->vidioc_enum_fmt_vid_out)) break; ret = ops->vidioc_enum_fmt_vid_out(file, fh, arg); break; case V4L2_BUF_TYPE_SDR_CAPTURE: if (unlikely(!ops->vidioc_enum_fmt_sdr_cap)) break; ret = ops->vidioc_enum_fmt_sdr_cap(file, fh, arg); break; case V4L2_BUF_TYPE_SDR_OUTPUT: if (unlikely(!ops->vidioc_enum_fmt_sdr_out)) break; ret = ops->vidioc_enum_fmt_sdr_out(file, fh, arg); break; case V4L2_BUF_TYPE_META_CAPTURE: if (unlikely(!ops->vidioc_enum_fmt_meta_cap)) break; ret = ops->vidioc_enum_fmt_meta_cap(file, fh, arg); break; case V4L2_BUF_TYPE_META_OUTPUT: if (unlikely(!ops->vidioc_enum_fmt_meta_out)) break; ret = ops->vidioc_enum_fmt_meta_out(file, fh, arg); break; } if (ret == 0) v4l_fill_fmtdesc(p); return ret; } static void v4l_pix_format_touch(struct v4l2_pix_format *p) { /* * The v4l2_pix_format structure contains fields that make no sense for * touch. Set them to default values in this case. */ p->field = V4L2_FIELD_NONE; p->colorspace = V4L2_COLORSPACE_RAW; p->flags = 0; p->ycbcr_enc = 0; p->quantization = 0; p->xfer_func = 0; } static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_format *p = arg; struct video_device *vfd = video_devdata(file); int ret = check_fmt(file, p->type); if (ret) return ret; memset(&p->fmt, 0, sizeof(p->fmt)); switch (p->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: if (unlikely(!ops->vidioc_g_fmt_vid_cap)) break; p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg); /* just in case the driver zeroed it again */ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; if (vfd->vfl_type == VFL_TYPE_TOUCH) v4l_pix_format_touch(&p->fmt.pix); return ret; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OVERLAY: return ops->vidioc_g_fmt_vid_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_CAPTURE: return ops->vidioc_g_fmt_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT: if (unlikely(!ops->vidioc_g_fmt_vid_out)) break; p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; ret = ops->vidioc_g_fmt_vid_out(file, fh, arg); /* just in case the driver zeroed it again */ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_OUTPUT: return ops->vidioc_g_fmt_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SDR_CAPTURE: return ops->vidioc_g_fmt_sdr_cap(file, fh, arg); case V4L2_BUF_TYPE_SDR_OUTPUT: return ops->vidioc_g_fmt_sdr_out(file, fh, arg); case V4L2_BUF_TYPE_META_CAPTURE: return ops->vidioc_g_fmt_meta_cap(file, fh, arg); case V4L2_BUF_TYPE_META_OUTPUT: return ops->vidioc_g_fmt_meta_out(file, fh, arg); } return -EINVAL; } static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_format *p = arg; struct video_device *vfd = video_devdata(file); int ret = check_fmt(file, p->type); unsigned int i; if (ret) return ret; ret = v4l_enable_media_source(vfd); if (ret) return ret; v4l_sanitize_format(p); switch (p->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: if (unlikely(!ops->vidioc_s_fmt_vid_cap)) break; memset_after(p, 0, fmt.pix); ret = ops->vidioc_s_fmt_vid_cap(file, fh, arg); /* just in case the driver zeroed it again */ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; if (vfd->vfl_type == VFL_TYPE_TOUCH) v4l_pix_format_touch(&p->fmt.pix); return ret; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) break; memset_after(p, 0, fmt.pix_mp.xfer_func); for (i = 0; i < p->fmt.pix_mp.num_planes; i++) memset_after(&p->fmt.pix_mp.plane_fmt[i], 0, bytesperline); return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OVERLAY: if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) break; memset_after(p, 0, fmt.win); p->fmt.win.clips = NULL; p->fmt.win.clipcount = 0; p->fmt.win.bitmap = NULL; return ops->vidioc_s_fmt_vid_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_CAPTURE: if (unlikely(!ops->vidioc_s_fmt_vbi_cap)) break; memset_after(p, 0, fmt.vbi.flags); return ops->vidioc_s_fmt_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap)) break; memset_after(p, 0, fmt.sliced.io_size); return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT: if (unlikely(!ops->vidioc_s_fmt_vid_out)) break; memset_after(p, 0, fmt.pix); ret = ops->vidioc_s_fmt_vid_out(file, fh, arg); /* just in case the driver zeroed it again */ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) break; memset_after(p, 0, fmt.pix_mp.xfer_func); for (i = 0; i < p->fmt.pix_mp.num_planes; i++) memset_after(&p->fmt.pix_mp.plane_fmt[i], 0, bytesperline); return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) break; memset_after(p, 0, fmt.win); p->fmt.win.clips = NULL; p->fmt.win.clipcount = 0; p->fmt.win.bitmap = NULL; return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_OUTPUT: if (unlikely(!ops->vidioc_s_fmt_vbi_out)) break; memset_after(p, 0, fmt.vbi.flags); return ops->vidioc_s_fmt_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out)) break; memset_after(p, 0, fmt.sliced.io_size); return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SDR_CAPTURE: if (unlikely(!ops->vidioc_s_fmt_sdr_cap)) break; memset_after(p, 0, fmt.sdr.buffersize); return ops->vidioc_s_fmt_sdr_cap(file, fh, arg); case V4L2_BUF_TYPE_SDR_OUTPUT: if (unlikely(!ops->vidioc_s_fmt_sdr_out)) break; memset_after(p, 0, fmt.sdr.buffersize); return ops->vidioc_s_fmt_sdr_out(file, fh, arg); case V4L2_BUF_TYPE_META_CAPTURE: if (unlikely(!ops->vidioc_s_fmt_meta_cap)) break; memset_after(p, 0, fmt.meta); return ops->vidioc_s_fmt_meta_cap(file, fh, arg); case V4L2_BUF_TYPE_META_OUTPUT: if (unlikely(!ops->vidioc_s_fmt_meta_out)) break; memset_after(p, 0, fmt.meta); return ops->vidioc_s_fmt_meta_out(file, fh, arg); } return -EINVAL; } static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_format *p = arg; struct video_device *vfd = video_devdata(file); int ret = check_fmt(file, p->type); unsigned int i; if (ret) return ret; v4l_sanitize_format(p); switch (p->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: if (unlikely(!ops->vidioc_try_fmt_vid_cap)) break; memset_after(p, 0, fmt.pix); ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg); /* just in case the driver zeroed it again */ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; if (vfd->vfl_type == VFL_TYPE_TOUCH) v4l_pix_format_touch(&p->fmt.pix); return ret; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) break; memset_after(p, 0, fmt.pix_mp.xfer_func); for (i = 0; i < p->fmt.pix_mp.num_planes; i++) memset_after(&p->fmt.pix_mp.plane_fmt[i], 0, bytesperline); return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OVERLAY: if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) break; memset_after(p, 0, fmt.win); p->fmt.win.clips = NULL; p->fmt.win.clipcount = 0; p->fmt.win.bitmap = NULL; return ops->vidioc_try_fmt_vid_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_CAPTURE: if (unlikely(!ops->vidioc_try_fmt_vbi_cap)) break; memset_after(p, 0, fmt.vbi.flags); return ops->vidioc_try_fmt_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap)) break; memset_after(p, 0, fmt.sliced.io_size); return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT: if (unlikely(!ops->vidioc_try_fmt_vid_out)) break; memset_after(p, 0, fmt.pix); ret = ops->vidioc_try_fmt_vid_out(file, fh, arg); /* just in case the driver zeroed it again */ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) break; memset_after(p, 0, fmt.pix_mp.xfer_func); for (i = 0; i < p->fmt.pix_mp.num_planes; i++) memset_after(&p->fmt.pix_mp.plane_fmt[i], 0, bytesperline); return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) break; memset_after(p, 0, fmt.win); p->fmt.win.clips = NULL; p->fmt.win.clipcount = 0; p->fmt.win.bitmap = NULL; return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_OUTPUT: if (unlikely(!ops->vidioc_try_fmt_vbi_out)) break; memset_after(p, 0, fmt.vbi.flags); return ops->vidioc_try_fmt_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out)) break; memset_after(p, 0, fmt.sliced.io_size); return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SDR_CAPTURE: if (unlikely(!ops->vidioc_try_fmt_sdr_cap)) break; memset_after(p, 0, fmt.sdr.buffersize); return ops->vidioc_try_fmt_sdr_cap(file, fh, arg); case V4L2_BUF_TYPE_SDR_OUTPUT: if (unlikely(!ops->vidioc_try_fmt_sdr_out)) break; memset_after(p, 0, fmt.sdr.buffersize); return ops->vidioc_try_fmt_sdr_out(file, fh, arg); case V4L2_BUF_TYPE_META_CAPTURE: if (unlikely(!ops->vidioc_try_fmt_meta_cap)) break; memset_after(p, 0, fmt.meta); return ops->vidioc_try_fmt_meta_cap(file, fh, arg); case V4L2_BUF_TYPE_META_OUTPUT: if (unlikely(!ops->vidioc_try_fmt_meta_out)) break; memset_after(p, 0, fmt.meta); return ops->vidioc_try_fmt_meta_out(file, fh, arg); } return -EINVAL; } static int v4l_streamon(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { return ops->vidioc_streamon(file, fh, *(unsigned int *)arg); } static int v4l_streamoff(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { return ops->vidioc_streamoff(file, fh, *(unsigned int *)arg); } static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_tuner *p = arg; int err; p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; err = ops->vidioc_g_tuner(file, fh, p); if (!err) p->capability |= V4L2_TUNER_CAP_FREQ_BANDS; return err; } static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_tuner *p = arg; int ret; ret = v4l_enable_media_source(vfd); if (ret) return ret; p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; return ops->vidioc_s_tuner(file, fh, p); } static int v4l_g_modulator(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_modulator *p = arg; int err; if (vfd->vfl_type == VFL_TYPE_RADIO) p->type = V4L2_TUNER_RADIO; err = ops->vidioc_g_modulator(file, fh, p); if (!err) p->capability |= V4L2_TUNER_CAP_FREQ_BANDS; return err; } static int v4l_s_modulator(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_modulator *p = arg; if (vfd->vfl_type == VFL_TYPE_RADIO) p->type = V4L2_TUNER_RADIO; return ops->vidioc_s_modulator(file, fh, p); } static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_frequency *p = arg; if (vfd->vfl_type == VFL_TYPE_SDR) p->type = V4L2_TUNER_SDR; else p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; return ops->vidioc_g_frequency(file, fh, p); } static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); const struct v4l2_frequency *p = arg; enum v4l2_tuner_type type; int ret; ret = v4l_enable_media_source(vfd); if (ret) return ret; if (vfd->vfl_type == VFL_TYPE_SDR) { if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF) return -EINVAL; } else { type = (vfd->vfl_type == VFL_TYPE_RADIO) ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; if (type != p->type) return -EINVAL; } return ops->vidioc_s_frequency(file, fh, p); } static int v4l_enumstd(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_standard *p = arg; return v4l_video_std_enumstd(p, vfd->tvnorms); } static int v4l_s_std(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); v4l2_std_id id = *(v4l2_std_id *)arg, norm; int ret; ret = v4l_enable_media_source(vfd); if (ret) return ret; norm = id & vfd->tvnorms; if (vfd->tvnorms && !norm) /* Check if std is supported */ return -EINVAL; /* Calls the specific handler */ return ops->vidioc_s_std(file, fh, norm); } static int v4l_querystd(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); v4l2_std_id *p = arg; int ret; ret = v4l_enable_media_source(vfd); if (ret) return ret; /* * If no signal is detected, then the driver should return * V4L2_STD_UNKNOWN. Otherwise it should return tvnorms with * any standards that do not apply removed. * * This means that tuners, audio and video decoders can join * their efforts to improve the standards detection. */ *p = vfd->tvnorms; return ops->vidioc_querystd(file, fh, arg); } static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_hw_freq_seek *p = arg; enum v4l2_tuner_type type; int ret; ret = v4l_enable_media_source(vfd); if (ret) return ret; /* s_hw_freq_seek is not supported for SDR for now */ if (vfd->vfl_type == VFL_TYPE_SDR) return -EINVAL; type = (vfd->vfl_type == VFL_TYPE_RADIO) ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; if (p->type != type) return -EINVAL; return ops->vidioc_s_hw_freq_seek(file, fh, p); } static int v4l_s_fbuf(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_framebuffer *p = arg; p->base = NULL; return ops->vidioc_s_fbuf(file, fh, p); } static int v4l_overlay(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { return ops->vidioc_overlay(file, fh, *(unsigned int *)arg); } static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_requestbuffers *p = arg; int ret = check_fmt(file, p->type); if (ret) return ret; memset_after(p, 0, flags); p->capabilities = 0; if (is_valid_ioctl(vfd, VIDIOC_REMOVE_BUFS)) p->capabilities = V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS; return ops->vidioc_reqbufs(file, fh, p); } static int v4l_querybuf(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_buffer *p = arg; int ret = check_fmt(file, p->type); return ret ? ret : ops->vidioc_querybuf(file, fh, p); } static int v4l_qbuf(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_buffer *p = arg; int ret = check_fmt(file, p->type); return ret ? ret : ops->vidioc_qbuf(file, fh, p); } static int v4l_dqbuf(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_buffer *p = arg; int ret = check_fmt(file, p->type); return ret ? ret : ops->vidioc_dqbuf(file, fh, p); } static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_create_buffers *create = arg; int ret = check_fmt(file, create->format.type); if (ret) return ret; memset_after(create, 0, flags); v4l_sanitize_format(&create->format); create->capabilities = 0; if (is_valid_ioctl(vfd, VIDIOC_REMOVE_BUFS)) create->capabilities = V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS; ret = ops->vidioc_create_bufs(file, fh, create); if (create->format.type == V4L2_BUF_TYPE_VIDEO_CAPTURE || create->format.type == V4L2_BUF_TYPE_VIDEO_OUTPUT) create->format.fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; } static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_buffer *b = arg; int ret = check_fmt(file, b->type); return ret ? ret : ops->vidioc_prepare_buf(file, fh, b); } static int v4l_remove_bufs(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_remove_buffers *remove = arg; if (ops->vidioc_remove_bufs) return ops->vidioc_remove_bufs(file, fh, remove); return -ENOTTY; } static int v4l_g_parm(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_streamparm *p = arg; v4l2_std_id std; int ret = check_fmt(file, p->type); if (ret) return ret; if (ops->vidioc_g_parm) return ops->vidioc_g_parm(file, fh, p); if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return -EINVAL; if (vfd->device_caps & V4L2_CAP_READWRITE) p->parm.capture.readbuffers = 2; ret = ops->vidioc_g_std(file, fh, &std); if (ret == 0) v4l2_video_std_frame_period(std, &p->parm.capture.timeperframe); return ret; } static int v4l_s_parm(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_streamparm *p = arg; int ret = check_fmt(file, p->type); if (ret) return ret; /* Note: extendedmode is never used in drivers */ if (V4L2_TYPE_IS_OUTPUT(p->type)) { memset(p->parm.output.reserved, 0, sizeof(p->parm.output.reserved)); p->parm.output.extendedmode = 0; p->parm.output.outputmode &= V4L2_MODE_HIGHQUALITY; } else { memset(p->parm.capture.reserved, 0, sizeof(p->parm.capture.reserved)); p->parm.capture.extendedmode = 0; p->parm.capture.capturemode &= V4L2_MODE_HIGHQUALITY; } return ops->vidioc_s_parm(file, fh, p); } static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_queryctrl *p = arg; struct v4l2_fh *vfh = test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; if (vfh && vfh->ctrl_handler) return v4l2_queryctrl(vfh->ctrl_handler, p); if (vfd->ctrl_handler) return v4l2_queryctrl(vfd->ctrl_handler, p); if (ops->vidioc_queryctrl) return ops->vidioc_queryctrl(file, fh, p); return -ENOTTY; } static int v4l_query_ext_ctrl(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_query_ext_ctrl *p = arg; struct v4l2_fh *vfh = test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; if (vfh && vfh->ctrl_handler) return v4l2_query_ext_ctrl(vfh->ctrl_handler, p); if (vfd->ctrl_handler) return v4l2_query_ext_ctrl(vfd->ctrl_handler, p); if (ops->vidioc_query_ext_ctrl) return ops->vidioc_query_ext_ctrl(file, fh, p); return -ENOTTY; } static int v4l_querymenu(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_querymenu *p = arg; struct v4l2_fh *vfh = test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; if (vfh && vfh->ctrl_handler) return v4l2_querymenu(vfh->ctrl_handler, p); if (vfd->ctrl_handler) return v4l2_querymenu(vfd->ctrl_handler, p); if (ops->vidioc_querymenu) return ops->vidioc_querymenu(file, fh, p); return -ENOTTY; } static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_control *p = arg; struct v4l2_fh *vfh = test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; struct v4l2_ext_controls ctrls; struct v4l2_ext_control ctrl; if (vfh && vfh->ctrl_handler) return v4l2_g_ctrl(vfh->ctrl_handler, p); if (vfd->ctrl_handler) return v4l2_g_ctrl(vfd->ctrl_handler, p); if (ops->vidioc_g_ctrl) return ops->vidioc_g_ctrl(file, fh, p); if (ops->vidioc_g_ext_ctrls == NULL) return -ENOTTY; ctrls.which = V4L2_CTRL_ID2WHICH(p->id); ctrls.count = 1; ctrls.controls = &ctrl; ctrl.id = p->id; ctrl.value = p->value; if (check_ext_ctrls(&ctrls, VIDIOC_G_CTRL)) { int ret = ops->vidioc_g_ext_ctrls(file, fh, &ctrls); if (ret == 0) p->value = ctrl.value; return ret; } return -EINVAL; } static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_control *p = arg; struct v4l2_fh *vfh = test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; struct v4l2_ext_controls ctrls; struct v4l2_ext_control ctrl; int ret; if (vfh && vfh->ctrl_handler) return v4l2_s_ctrl(vfh, vfh->ctrl_handler, p); if (vfd->ctrl_handler) return v4l2_s_ctrl(NULL, vfd->ctrl_handler, p); if (ops->vidioc_s_ctrl) return ops->vidioc_s_ctrl(file, fh, p); if (ops->vidioc_s_ext_ctrls == NULL) return -ENOTTY; ctrls.which = V4L2_CTRL_ID2WHICH(p->id); ctrls.count = 1; ctrls.controls = &ctrl; ctrl.id = p->id; ctrl.value = p->value; if (!check_ext_ctrls(&ctrls, VIDIOC_S_CTRL)) return -EINVAL; ret = ops->vidioc_s_ext_ctrls(file, fh, &ctrls); p->value = ctrl.value; return ret; } static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_ext_controls *p = arg; struct v4l2_fh *vfh = test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; p->error_idx = p->count; if (vfh && vfh->ctrl_handler) return v4l2_g_ext_ctrls(vfh->ctrl_handler, vfd, vfd->v4l2_dev->mdev, p); if (vfd->ctrl_handler) return v4l2_g_ext_ctrls(vfd->ctrl_handler, vfd, vfd->v4l2_dev->mdev, p); if (ops->vidioc_g_ext_ctrls == NULL) return -ENOTTY; return check_ext_ctrls(p, VIDIOC_G_EXT_CTRLS) ? ops->vidioc_g_ext_ctrls(file, fh, p) : -EINVAL; } static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_ext_controls *p = arg; struct v4l2_fh *vfh = test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; p->error_idx = p->count; if (vfh && vfh->ctrl_handler) return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, vfd, vfd->v4l2_dev->mdev, p); if (vfd->ctrl_handler) return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, vfd, vfd->v4l2_dev->mdev, p); if (ops->vidioc_s_ext_ctrls == NULL) return -ENOTTY; return check_ext_ctrls(p, VIDIOC_S_EXT_CTRLS) ? ops->vidioc_s_ext_ctrls(file, fh, p) : -EINVAL; } static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_ext_controls *p = arg; struct v4l2_fh *vfh = test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL; p->error_idx = p->count; if (vfh && vfh->ctrl_handler) return v4l2_try_ext_ctrls(vfh->ctrl_handler, vfd, vfd->v4l2_dev->mdev, p); if (vfd->ctrl_handler) return v4l2_try_ext_ctrls(vfd->ctrl_handler, vfd, vfd->v4l2_dev->mdev, p); if (ops->vidioc_try_ext_ctrls == NULL) return -ENOTTY; return check_ext_ctrls(p, VIDIOC_TRY_EXT_CTRLS) ? ops->vidioc_try_ext_ctrls(file, fh, p) : -EINVAL; } /* * The selection API specified originally that the _MPLANE buffer types * shouldn't be used. The reasons for this are lost in the mists of time * (or just really crappy memories). Regardless, this is really annoying * for userspace. So to keep things simple we map _MPLANE buffer types * to their 'regular' counterparts before calling the driver. And we * restore it afterwards. This way applications can use either buffer * type and drivers don't need to check for both. */ static int v4l_g_selection(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_selection *p = arg; u32 old_type = p->type; int ret; if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; ret = ops->vidioc_g_selection(file, fh, p); p->type = old_type; return ret; } static int v4l_s_selection(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_selection *p = arg; u32 old_type = p->type; int ret; if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; ret = ops->vidioc_s_selection(file, fh, p); p->type = old_type; return ret; } static int v4l_g_crop(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_crop *p = arg; struct v4l2_selection s = { .type = p->type, }; int ret; /* simulate capture crop using selection api */ /* crop means compose for output devices */ if (V4L2_TYPE_IS_OUTPUT(p->type)) s.target = V4L2_SEL_TGT_COMPOSE; else s.target = V4L2_SEL_TGT_CROP; if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags)) s.target = s.target == V4L2_SEL_TGT_COMPOSE ? V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE; ret = v4l_g_selection(ops, file, fh, &s); /* copying results to old structure on success */ if (!ret) p->c = s.r; return ret; } static int v4l_s_crop(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_crop *p = arg; struct v4l2_selection s = { .type = p->type, .r = p->c, }; /* simulate capture crop using selection api */ /* crop means compose for output devices */ if (V4L2_TYPE_IS_OUTPUT(p->type)) s.target = V4L2_SEL_TGT_COMPOSE; else s.target = V4L2_SEL_TGT_CROP; if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags)) s.target = s.target == V4L2_SEL_TGT_COMPOSE ? V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE; return v4l_s_selection(ops, file, fh, &s); } static int v4l_cropcap(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_cropcap *p = arg; struct v4l2_selection s = { .type = p->type }; int ret = 0; /* setting trivial pixelaspect */ p->pixelaspect.numerator = 1; p->pixelaspect.denominator = 1; if (s.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) s.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; else if (s.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) s.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; /* * The determine_valid_ioctls() call already should ensure * that this can never happen, but just in case... */ if (WARN_ON(!ops->vidioc_g_selection)) return -ENOTTY; if (ops->vidioc_g_pixelaspect) ret = ops->vidioc_g_pixelaspect(file, fh, s.type, &p->pixelaspect); /* * Ignore ENOTTY or ENOIOCTLCMD error returns, just use the * square pixel aspect ratio in that case. */ if (ret && ret != -ENOTTY && ret != -ENOIOCTLCMD) return ret; /* Use g_selection() to fill in the bounds and defrect rectangles */ /* obtaining bounds */ if (V4L2_TYPE_IS_OUTPUT(p->type)) s.target = V4L2_SEL_TGT_COMPOSE_BOUNDS; else s.target = V4L2_SEL_TGT_CROP_BOUNDS; if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags)) s.target = s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS ? V4L2_SEL_TGT_CROP_BOUNDS : V4L2_SEL_TGT_COMPOSE_BOUNDS; ret = v4l_g_selection(ops, file, fh, &s); if (ret) return ret; p->bounds = s.r; /* obtaining defrect */ if (s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS) s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT; else s.target = V4L2_SEL_TGT_CROP_DEFAULT; ret = v4l_g_selection(ops, file, fh, &s); if (ret) return ret; p->defrect = s.r; return 0; } static int v4l_log_status(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); int ret; if (vfd->v4l2_dev) pr_info("%s: ================= START STATUS =================\n", vfd->v4l2_dev->name); ret = ops->vidioc_log_status(file, fh); if (vfd->v4l2_dev) pr_info("%s: ================== END STATUS ==================\n", vfd->v4l2_dev->name); return ret; } static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { #ifdef CONFIG_VIDEO_ADV_DEBUG struct v4l2_dbg_register *p = arg; struct video_device *vfd = video_devdata(file); struct v4l2_subdev *sd; int idx = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) { if (vfd->v4l2_dev == NULL) return -EINVAL; v4l2_device_for_each_subdev(sd, vfd->v4l2_dev) if (p->match.addr == idx++) return v4l2_subdev_call(sd, core, g_register, p); return -EINVAL; } if (ops->vidioc_g_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE && (ops->vidioc_g_chip_info || p->match.addr == 0)) return ops->vidioc_g_register(file, fh, p); return -EINVAL; #else return -ENOTTY; #endif } static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { #ifdef CONFIG_VIDEO_ADV_DEBUG const struct v4l2_dbg_register *p = arg; struct video_device *vfd = video_devdata(file); struct v4l2_subdev *sd; int idx = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) { if (vfd->v4l2_dev == NULL) return -EINVAL; v4l2_device_for_each_subdev(sd, vfd->v4l2_dev) if (p->match.addr == idx++) return v4l2_subdev_call(sd, core, s_register, p); return -EINVAL; } if (ops->vidioc_s_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE && (ops->vidioc_g_chip_info || p->match.addr == 0)) return ops->vidioc_s_register(file, fh, p); return -EINVAL; #else return -ENOTTY; #endif } static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { #ifdef CONFIG_VIDEO_ADV_DEBUG struct video_device *vfd = video_devdata(file); struct v4l2_dbg_chip_info *p = arg; struct v4l2_subdev *sd; int idx = 0; switch (p->match.type) { case V4L2_CHIP_MATCH_BRIDGE: if (ops->vidioc_s_register) p->flags |= V4L2_CHIP_FL_WRITABLE; if (ops->vidioc_g_register) p->flags |= V4L2_CHIP_FL_READABLE; strscpy(p->name, vfd->v4l2_dev->name, sizeof(p->name)); if (ops->vidioc_g_chip_info) return ops->vidioc_g_chip_info(file, fh, arg); if (p->match.addr) return -EINVAL; return 0; case V4L2_CHIP_MATCH_SUBDEV: if (vfd->v4l2_dev == NULL) break; v4l2_device_for_each_subdev(sd, vfd->v4l2_dev) { if (p->match.addr != idx++) continue; if (sd->ops->core && sd->ops->core->s_register) p->flags |= V4L2_CHIP_FL_WRITABLE; if (sd->ops->core && sd->ops->core->g_register) p->flags |= V4L2_CHIP_FL_READABLE; strscpy(p->name, sd->name, sizeof(p->name)); return 0; } break; } return -EINVAL; #else return -ENOTTY; #endif } static int v4l_dqevent(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { return v4l2_event_dequeue(fh, arg, file->f_flags & O_NONBLOCK); } static int v4l_subscribe_event(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { return ops->vidioc_subscribe_event(fh, arg); } static int v4l_unsubscribe_event(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { return ops->vidioc_unsubscribe_event(fh, arg); } static int v4l_g_sliced_vbi_cap(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_sliced_vbi_cap *p = arg; int ret = check_fmt(file, p->type); if (ret) return ret; /* Clear up to type, everything after type is zeroed already */ memset(p, 0, offsetof(struct v4l2_sliced_vbi_cap, type)); return ops->vidioc_g_sliced_vbi_cap(file, fh, p); } static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_frequency_band *p = arg; enum v4l2_tuner_type type; int err; if (vfd->vfl_type == VFL_TYPE_SDR) { if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF) return -EINVAL; type = p->type; } else { type = (vfd->vfl_type == VFL_TYPE_RADIO) ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; if (type != p->type) return -EINVAL; } if (ops->vidioc_enum_freq_bands) { err = ops->vidioc_enum_freq_bands(file, fh, p); if (err != -ENOTTY) return err; } if (is_valid_ioctl(vfd, VIDIOC_G_TUNER)) { struct v4l2_tuner t = { .index = p->tuner, .type = type, }; if (p->index) return -EINVAL; err = ops->vidioc_g_tuner(file, fh, &t); if (err) return err; p->capability = t.capability | V4L2_TUNER_CAP_FREQ_BANDS; p->rangelow = t.rangelow; p->rangehigh = t.rangehigh; p->modulation = (type == V4L2_TUNER_RADIO) ? V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB; return 0; } if (is_valid_ioctl(vfd, VIDIOC_G_MODULATOR)) { struct v4l2_modulator m = { .index = p->tuner, }; if (type != V4L2_TUNER_RADIO) return -EINVAL; if (p->index) return -EINVAL; err = ops->vidioc_g_modulator(file, fh, &m); if (err) return err; p->capability = m.capability | V4L2_TUNER_CAP_FREQ_BANDS; p->rangelow = m.rangelow; p->rangehigh = m.rangehigh; p->modulation = (type == V4L2_TUNER_RADIO) ? V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB; return 0; } return -ENOTTY; } struct v4l2_ioctl_info { unsigned int ioctl; u32 flags; const char * const name; int (*func)(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *p); void (*debug)(const void *arg, bool write_only); }; /* This control needs a priority check */ #define INFO_FL_PRIO (1 << 0) /* This control can be valid if the filehandle passes a control handler. */ #define INFO_FL_CTRL (1 << 1) /* Queuing ioctl */ #define INFO_FL_QUEUE (1 << 2) /* Always copy back result, even on error */ #define INFO_FL_ALWAYS_COPY (1 << 3) /* Zero struct from after the field to the end */ #define INFO_FL_CLEAR(v4l2_struct, field) \ ((offsetof(struct v4l2_struct, field) + \ sizeof_field(struct v4l2_struct, field)) << 16) #define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16) #define DEFINE_V4L_STUB_FUNC(_vidioc) \ static int v4l_stub_ ## _vidioc( \ const struct v4l2_ioctl_ops *ops, \ struct file *file, void *fh, void *p) \ { \ return ops->vidioc_ ## _vidioc(file, fh, p); \ } #define IOCTL_INFO(_ioctl, _func, _debug, _flags) \ [_IOC_NR(_ioctl)] = { \ .ioctl = _ioctl, \ .flags = _flags, \ .name = #_ioctl, \ .func = _func, \ .debug = _debug, \ } DEFINE_V4L_STUB_FUNC(g_fbuf) DEFINE_V4L_STUB_FUNC(expbuf) DEFINE_V4L_STUB_FUNC(g_std) DEFINE_V4L_STUB_FUNC(g_audio) DEFINE_V4L_STUB_FUNC(s_audio) DEFINE_V4L_STUB_FUNC(g_edid) DEFINE_V4L_STUB_FUNC(s_edid) DEFINE_V4L_STUB_FUNC(g_audout) DEFINE_V4L_STUB_FUNC(s_audout) DEFINE_V4L_STUB_FUNC(g_jpegcomp) DEFINE_V4L_STUB_FUNC(s_jpegcomp) DEFINE_V4L_STUB_FUNC(enumaudio) DEFINE_V4L_STUB_FUNC(enumaudout) DEFINE_V4L_STUB_FUNC(enum_framesizes) DEFINE_V4L_STUB_FUNC(enum_frameintervals) DEFINE_V4L_STUB_FUNC(g_enc_index) DEFINE_V4L_STUB_FUNC(encoder_cmd) DEFINE_V4L_STUB_FUNC(try_encoder_cmd) DEFINE_V4L_STUB_FUNC(decoder_cmd) DEFINE_V4L_STUB_FUNC(try_decoder_cmd) DEFINE_V4L_STUB_FUNC(s_dv_timings) DEFINE_V4L_STUB_FUNC(g_dv_timings) DEFINE_V4L_STUB_FUNC(enum_dv_timings) DEFINE_V4L_STUB_FUNC(query_dv_timings) DEFINE_V4L_STUB_FUNC(dv_timings_cap) static const struct v4l2_ioctl_info v4l2_ioctls[] = { IOCTL_INFO(VIDIOC_QUERYCAP, v4l_querycap, v4l_print_querycap, 0), IOCTL_INFO(VIDIOC_ENUM_FMT, v4l_enum_fmt, v4l_print_fmtdesc, 0), IOCTL_INFO(VIDIOC_G_FMT, v4l_g_fmt, v4l_print_format, 0), IOCTL_INFO(VIDIOC_S_FMT, v4l_s_fmt, v4l_print_format, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_REQBUFS, v4l_reqbufs, v4l_print_requestbuffers, INFO_FL_PRIO | INFO_FL_QUEUE), IOCTL_INFO(VIDIOC_QUERYBUF, v4l_querybuf, v4l_print_buffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_buffer, length)), IOCTL_INFO(VIDIOC_G_FBUF, v4l_stub_g_fbuf, v4l_print_framebuffer, 0), IOCTL_INFO(VIDIOC_S_FBUF, v4l_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE), IOCTL_INFO(VIDIOC_EXPBUF, v4l_stub_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)), IOCTL_INFO(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE), IOCTL_INFO(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE), IOCTL_INFO(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE), IOCTL_INFO(VIDIOC_G_PARM, v4l_g_parm, v4l_print_streamparm, INFO_FL_CLEAR(v4l2_streamparm, type)), IOCTL_INFO(VIDIOC_S_PARM, v4l_s_parm, v4l_print_streamparm, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_G_STD, v4l_stub_g_std, v4l_print_std, 0), IOCTL_INFO(VIDIOC_S_STD, v4l_s_std, v4l_print_std, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_ENUMSTD, v4l_enumstd, v4l_print_standard, INFO_FL_CLEAR(v4l2_standard, index)), IOCTL_INFO(VIDIOC_ENUMINPUT, v4l_enuminput, v4l_print_enuminput, INFO_FL_CLEAR(v4l2_input, index)), IOCTL_INFO(VIDIOC_G_CTRL, v4l_g_ctrl, v4l_print_control, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_control, id)), IOCTL_INFO(VIDIOC_S_CTRL, v4l_s_ctrl, v4l_print_control, INFO_FL_PRIO | INFO_FL_CTRL), IOCTL_INFO(VIDIOC_G_TUNER, v4l_g_tuner, v4l_print_tuner, INFO_FL_CLEAR(v4l2_tuner, index)), IOCTL_INFO(VIDIOC_S_TUNER, v4l_s_tuner, v4l_print_tuner, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_G_AUDIO, v4l_stub_g_audio, v4l_print_audio, 0), IOCTL_INFO(VIDIOC_S_AUDIO, v4l_stub_s_audio, v4l_print_audio, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_QUERYCTRL, v4l_queryctrl, v4l_print_queryctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_queryctrl, id)), IOCTL_INFO(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)), IOCTL_INFO(VIDIOC_G_INPUT, v4l_g_input, v4l_print_u32, 0), IOCTL_INFO(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_G_EDID, v4l_stub_g_edid, v4l_print_edid, INFO_FL_ALWAYS_COPY), IOCTL_INFO(VIDIOC_S_EDID, v4l_stub_s_edid, v4l_print_edid, INFO_FL_PRIO | INFO_FL_ALWAYS_COPY), IOCTL_INFO(VIDIOC_G_OUTPUT, v4l_g_output, v4l_print_u32, 0), IOCTL_INFO(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)), IOCTL_INFO(VIDIOC_G_AUDOUT, v4l_stub_g_audout, v4l_print_audioout, 0), IOCTL_INFO(VIDIOC_S_AUDOUT, v4l_stub_s_audout, v4l_print_audioout, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_G_MODULATOR, v4l_g_modulator, v4l_print_modulator, INFO_FL_CLEAR(v4l2_modulator, index)), IOCTL_INFO(VIDIOC_S_MODULATOR, v4l_s_modulator, v4l_print_modulator, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_G_FREQUENCY, v4l_g_frequency, v4l_print_frequency, INFO_FL_CLEAR(v4l2_frequency, tuner)), IOCTL_INFO(VIDIOC_S_FREQUENCY, v4l_s_frequency, v4l_print_frequency, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_CROPCAP, v4l_cropcap, v4l_print_cropcap, INFO_FL_CLEAR(v4l2_cropcap, type)), IOCTL_INFO(VIDIOC_G_CROP, v4l_g_crop, v4l_print_crop, INFO_FL_CLEAR(v4l2_crop, type)), IOCTL_INFO(VIDIOC_S_CROP, v4l_s_crop, v4l_print_crop, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_G_SELECTION, v4l_g_selection, v4l_print_selection, INFO_FL_CLEAR(v4l2_selection, r)), IOCTL_INFO(VIDIOC_S_SELECTION, v4l_s_selection, v4l_print_selection, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_selection, r)), IOCTL_INFO(VIDIOC_G_JPEGCOMP, v4l_stub_g_jpegcomp, v4l_print_jpegcompression, 0), IOCTL_INFO(VIDIOC_S_JPEGCOMP, v4l_stub_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_QUERYSTD, v4l_querystd, v4l_print_std, 0), IOCTL_INFO(VIDIOC_TRY_FMT, v4l_try_fmt, v4l_print_format, 0), IOCTL_INFO(VIDIOC_ENUMAUDIO, v4l_stub_enumaudio, v4l_print_audio, INFO_FL_CLEAR(v4l2_audio, index)), IOCTL_INFO(VIDIOC_ENUMAUDOUT, v4l_stub_enumaudout, v4l_print_audioout, INFO_FL_CLEAR(v4l2_audioout, index)), IOCTL_INFO(VIDIOC_G_PRIORITY, v4l_g_priority, v4l_print_u32, 0), IOCTL_INFO(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)), IOCTL_INFO(VIDIOC_LOG_STATUS, v4l_log_status, v4l_print_newline, 0), IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY), IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL | INFO_FL_ALWAYS_COPY), IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY), IOCTL_INFO(VIDIOC_ENUM_FRAMESIZES, v4l_stub_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)), IOCTL_INFO(VIDIOC_ENUM_FRAMEINTERVALS, v4l_stub_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)), IOCTL_INFO(VIDIOC_G_ENC_INDEX, v4l_stub_g_enc_index, v4l_print_enc_idx, 0), IOCTL_INFO(VIDIOC_ENCODER_CMD, v4l_stub_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_encoder_cmd, flags)), IOCTL_INFO(VIDIOC_TRY_ENCODER_CMD, v4l_stub_try_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_CLEAR(v4l2_encoder_cmd, flags)), IOCTL_INFO(VIDIOC_DECODER_CMD, v4l_stub_decoder_cmd, v4l_print_decoder_cmd, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_TRY_DECODER_CMD, v4l_stub_try_decoder_cmd, v4l_print_decoder_cmd, 0), IOCTL_INFO(VIDIOC_DBG_S_REGISTER, v4l_dbg_s_register, v4l_print_dbg_register, 0), IOCTL_INFO(VIDIOC_DBG_G_REGISTER, v4l_dbg_g_register, v4l_print_dbg_register, 0), IOCTL_INFO(VIDIOC_S_HW_FREQ_SEEK, v4l_s_hw_freq_seek, v4l_print_hw_freq_seek, INFO_FL_PRIO), IOCTL_INFO(VIDIOC_S_DV_TIMINGS, v4l_stub_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_dv_timings, bt.flags)), IOCTL_INFO(VIDIOC_G_DV_TIMINGS, v4l_stub_g_dv_timings, v4l_print_dv_timings, 0), IOCTL_INFO(VIDIOC_DQEVENT, v4l_dqevent, v4l_print_event, 0), IOCTL_INFO(VIDIOC_SUBSCRIBE_EVENT, v4l_subscribe_event, v4l_print_event_subscription, 0), IOCTL_INFO(VIDIOC_UNSUBSCRIBE_EVENT, v4l_unsubscribe_event, v4l_print_event_subscription, 0), IOCTL_INFO(VIDIOC_CREATE_BUFS, v4l_create_bufs, v4l_print_create_buffers, INFO_FL_PRIO | INFO_FL_QUEUE), IOCTL_INFO(VIDIOC_PREPARE_BUF, v4l_prepare_buf, v4l_print_buffer, INFO_FL_QUEUE), IOCTL_INFO(VIDIOC_ENUM_DV_TIMINGS, v4l_stub_enum_dv_timings, v4l_print_enum_dv_timings, INFO_FL_CLEAR(v4l2_enum_dv_timings, pad)), IOCTL_INFO(VIDIOC_QUERY_DV_TIMINGS, v4l_stub_query_dv_timings, v4l_print_dv_timings, INFO_FL_ALWAYS_COPY), IOCTL_INFO(VIDIOC_DV_TIMINGS_CAP, v4l_stub_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, pad)), IOCTL_INFO(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0), IOCTL_INFO(VIDIOC_DBG_G_CHIP_INFO, v4l_dbg_g_chip_info, v4l_print_dbg_chip_info, INFO_FL_CLEAR(v4l2_dbg_chip_info, match)), IOCTL_INFO(VIDIOC_QUERY_EXT_CTRL, v4l_query_ext_ctrl, v4l_print_query_ext_ctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_query_ext_ctrl, id)), IOCTL_INFO(VIDIOC_REMOVE_BUFS, v4l_remove_bufs, v4l_print_remove_buffers, INFO_FL_PRIO | INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_remove_buffers, type)), }; #define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls) static bool v4l2_is_known_ioctl(unsigned int cmd) { if (_IOC_NR(cmd) >= V4L2_IOCTLS) return false; return v4l2_ioctls[_IOC_NR(cmd)].ioctl == cmd; } static struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev, struct v4l2_fh *vfh, unsigned int cmd, void *arg) { if (_IOC_NR(cmd) >= V4L2_IOCTLS) return vdev->lock; if (vfh && vfh->m2m_ctx && (v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE)) { if (vfh->m2m_ctx->q_lock) return vfh->m2m_ctx->q_lock; } if (vdev->queue && vdev->queue->lock && (v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE)) return vdev->queue->lock; return vdev->lock; } /* Common ioctl debug function. This function can be used by external ioctl messages as well as internal V4L ioctl */ void v4l_printk_ioctl(const char *prefix, unsigned int cmd) { const char *dir, *type; if (prefix) printk(KERN_DEBUG "%s: ", prefix); switch (_IOC_TYPE(cmd)) { case 'd': type = "v4l2_int"; break; case 'V': if (!v4l2_is_known_ioctl(cmd)) { type = "v4l2"; break; } pr_cont("%s", v4l2_ioctls[_IOC_NR(cmd)].name); return; default: type = "unknown"; break; } switch (_IOC_DIR(cmd)) { case _IOC_NONE: dir = "--"; break; case _IOC_READ: dir = "r-"; break; case _IOC_WRITE: dir = "-w"; break; case _IOC_READ | _IOC_WRITE: dir = "rw"; break; default: dir = "*ERR*"; break; } pr_cont("%s ioctl '%c', dir=%s, #%d (0x%08x)", type, _IOC_TYPE(cmd), dir, _IOC_NR(cmd), cmd); } EXPORT_SYMBOL(v4l_printk_ioctl); static long __video_do_ioctl(struct file *file, unsigned int cmd, void *arg) { struct video_device *vfd = video_devdata(file); struct mutex *req_queue_lock = NULL; struct mutex *lock; /* ioctl serialization mutex */ const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; bool write_only = false; struct v4l2_ioctl_info default_info; const struct v4l2_ioctl_info *info; void *fh = file->private_data; struct v4l2_fh *vfh = NULL; int dev_debug = vfd->dev_debug; long ret = -ENOTTY; if (ops == NULL) { pr_warn("%s: has no ioctl_ops.\n", video_device_node_name(vfd)); return ret; } if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) vfh = file->private_data; /* * We need to serialize streamon/off with queueing new requests. * These ioctls may trigger the cancellation of a streaming * operation, and that should not be mixed with queueing a new * request at the same time. */ if (v4l2_device_supports_requests(vfd->v4l2_dev) && (cmd == VIDIOC_STREAMON || cmd == VIDIOC_STREAMOFF)) { req_queue_lock = &vfd->v4l2_dev->mdev->req_queue_mutex; if (mutex_lock_interruptible(req_queue_lock)) return -ERESTARTSYS; } lock = v4l2_ioctl_get_lock(vfd, vfh, cmd, arg); if (lock && mutex_lock_interruptible(lock)) { if (req_queue_lock) mutex_unlock(req_queue_lock); return -ERESTARTSYS; } if (!video_is_registered(vfd)) { ret = -ENODEV; goto unlock; } if (v4l2_is_known_ioctl(cmd)) { info = &v4l2_ioctls[_IOC_NR(cmd)]; if (!is_valid_ioctl(vfd, cmd) && !((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler)) goto done; if (vfh && (info->flags & INFO_FL_PRIO)) { ret = v4l2_prio_check(vfd->prio, vfh->prio); if (ret) goto done; } } else { default_info.ioctl = cmd; default_info.flags = 0; default_info.debug = v4l_print_default; info = &default_info; } write_only = _IOC_DIR(cmd) == _IOC_WRITE; if (info != &default_info) { ret = info->func(ops, file, fh, arg); } else if (!ops->vidioc_default) { ret = -ENOTTY; } else { ret = ops->vidioc_default(file, fh, vfh ? v4l2_prio_check(vfd->prio, vfh->prio) >= 0 : 0, cmd, arg); } done: if (dev_debug & (V4L2_DEV_DEBUG_IOCTL | V4L2_DEV_DEBUG_IOCTL_ARG)) { if (!(dev_debug & V4L2_DEV_DEBUG_STREAMING) && (cmd == VIDIOC_QBUF || cmd == VIDIOC_DQBUF)) goto unlock; v4l_printk_ioctl(video_device_node_name(vfd), cmd); if (ret < 0) pr_cont(": error %ld", ret); if (!(dev_debug & V4L2_DEV_DEBUG_IOCTL_ARG)) pr_cont("\n"); else if (_IOC_DIR(cmd) == _IOC_NONE) info->debug(arg, write_only); else { pr_cont(": "); info->debug(arg, write_only); } } unlock: if (lock) mutex_unlock(lock); if (req_queue_lock) mutex_unlock(req_queue_lock); return ret; } static int check_array_args(unsigned int cmd, void *parg, size_t *array_size, void __user **user_ptr, void ***kernel_ptr) { int ret = 0; switch (cmd) { case VIDIOC_PREPARE_BUF: case VIDIOC_QUERYBUF: case VIDIOC_QBUF: case VIDIOC_DQBUF: { struct v4l2_buffer *buf = parg; if (V4L2_TYPE_IS_MULTIPLANAR(buf->type) && buf->length > 0) { if (buf->length > VIDEO_MAX_PLANES) { ret = -EINVAL; break; } *user_ptr = (void __user *)buf->m.planes; *kernel_ptr = (void **)&buf->m.planes; *array_size = sizeof(struct v4l2_plane) * buf->length; ret = 1; } break; } case VIDIOC_G_EDID: case VIDIOC_S_EDID: { struct v4l2_edid *edid = parg; if (edid->blocks) { if (edid->blocks > 256) { ret = -EINVAL; break; } *user_ptr = (void __user *)edid->edid; *kernel_ptr = (void **)&edid->edid; *array_size = edid->blocks * 128; ret = 1; } break; } case VIDIOC_S_EXT_CTRLS: case VIDIOC_G_EXT_CTRLS: case VIDIOC_TRY_EXT_CTRLS: { struct v4l2_ext_controls *ctrls = parg; if (ctrls->count != 0) { if (ctrls->count > V4L2_CID_MAX_CTRLS) { ret = -EINVAL; break; } *user_ptr = (void __user *)ctrls->controls; *kernel_ptr = (void **)&ctrls->controls; *array_size = sizeof(struct v4l2_ext_control) * ctrls->count; ret = 1; } break; } case VIDIOC_SUBDEV_G_ROUTING: case VIDIOC_SUBDEV_S_ROUTING: { struct v4l2_subdev_routing *routing = parg; if (routing->len_routes > 256) return -E2BIG; *user_ptr = u64_to_user_ptr(routing->routes); *kernel_ptr = (void **)&routing->routes; *array_size = sizeof(struct v4l2_subdev_route) * routing->len_routes; ret = 1; break; } } return ret; } static unsigned int video_translate_cmd(unsigned int cmd) { #if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME) switch (cmd) { case VIDIOC_DQEVENT_TIME32: return VIDIOC_DQEVENT; case VIDIOC_QUERYBUF_TIME32: return VIDIOC_QUERYBUF; case VIDIOC_QBUF_TIME32: return VIDIOC_QBUF; case VIDIOC_DQBUF_TIME32: return VIDIOC_DQBUF; case VIDIOC_PREPARE_BUF_TIME32: return VIDIOC_PREPARE_BUF; } #endif if (in_compat_syscall()) return v4l2_compat_translate_cmd(cmd); return cmd; } static int video_get_user(void __user *arg, void *parg, unsigned int real_cmd, unsigned int cmd, bool *always_copy) { unsigned int n = _IOC_SIZE(real_cmd); int err = 0; if (!(_IOC_DIR(cmd) & _IOC_WRITE)) { /* read-only ioctl */ memset(parg, 0, n); return 0; } /* * In some cases, only a few fields are used as input, * i.e. when the app sets "index" and then the driver * fills in the rest of the structure for the thing * with that index. We only need to copy up the first * non-input field. */ if (v4l2_is_known_ioctl(real_cmd)) { u32 flags = v4l2_ioctls[_IOC_NR(real_cmd)].flags; if (flags & INFO_FL_CLEAR_MASK) n = (flags & INFO_FL_CLEAR_MASK) >> 16; *always_copy = flags & INFO_FL_ALWAYS_COPY; } if (cmd == real_cmd) { if (copy_from_user(parg, (void __user *)arg, n)) err = -EFAULT; } else if (in_compat_syscall()) { memset(parg, 0, n); err = v4l2_compat_get_user(arg, parg, cmd); } else { memset(parg, 0, n); #if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME) switch (cmd) { case VIDIOC_QUERYBUF_TIME32: case VIDIOC_QBUF_TIME32: case VIDIOC_DQBUF_TIME32: case VIDIOC_PREPARE_BUF_TIME32: { struct v4l2_buffer_time32 vb32; struct v4l2_buffer *vb = parg; if (copy_from_user(&vb32, arg, sizeof(vb32))) return -EFAULT; *vb = (struct v4l2_buffer) { .index = vb32.index, .type = vb32.type, .bytesused = vb32.bytesused, .flags = vb32.flags, .field = vb32.field, .timestamp.tv_sec = vb32.timestamp.tv_sec, .timestamp.tv_usec = vb32.timestamp.tv_usec, .timecode = vb32.timecode, .sequence = vb32.sequence, .memory = vb32.memory, .m.userptr = vb32.m.userptr, .length = vb32.length, .request_fd = vb32.request_fd, }; break; } } #endif } /* zero out anything we don't copy from userspace */ if (!err && n < _IOC_SIZE(real_cmd)) memset((u8 *)parg + n, 0, _IOC_SIZE(real_cmd) - n); return err; } static int video_put_user(void __user *arg, void *parg, unsigned int real_cmd, unsigned int cmd) { if (!(_IOC_DIR(cmd) & _IOC_READ)) return 0; if (cmd == real_cmd) { /* Copy results into user buffer */ if (copy_to_user(arg, parg, _IOC_SIZE(cmd))) return -EFAULT; return 0; } if (in_compat_syscall()) return v4l2_compat_put_user(arg, parg, cmd); #if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME) switch (cmd) { case VIDIOC_DQEVENT_TIME32: { struct v4l2_event *ev = parg; struct v4l2_event_time32 ev32; memset(&ev32, 0, sizeof(ev32)); ev32.type = ev->type; ev32.pending = ev->pending; ev32.sequence = ev->sequence; ev32.timestamp.tv_sec = ev->timestamp.tv_sec; ev32.timestamp.tv_nsec = ev->timestamp.tv_nsec; ev32.id = ev->id; memcpy(&ev32.u, &ev->u, sizeof(ev->u)); memcpy(&ev32.reserved, &ev->reserved, sizeof(ev->reserved)); if (copy_to_user(arg, &ev32, sizeof(ev32))) return -EFAULT; break; } case VIDIOC_QUERYBUF_TIME32: case VIDIOC_QBUF_TIME32: case VIDIOC_DQBUF_TIME32: case VIDIOC_PREPARE_BUF_TIME32: { struct v4l2_buffer *vb = parg; struct v4l2_buffer_time32 vb32; memset(&vb32, 0, sizeof(vb32)); vb32.index = vb->index; vb32.type = vb->type; vb32.bytesused = vb->bytesused; vb32.flags = vb->flags; vb32.field = vb->field; vb32.timestamp.tv_sec = vb->timestamp.tv_sec; vb32.timestamp.tv_usec = vb->timestamp.tv_usec; vb32.timecode = vb->timecode; vb32.sequence = vb->sequence; vb32.memory = vb->memory; vb32.m.userptr = vb->m.userptr; vb32.length = vb->length; vb32.request_fd = vb->request_fd; if (copy_to_user(arg, &vb32, sizeof(vb32))) return -EFAULT; break; } } #endif return 0; } long video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg, v4l2_kioctl func) { char sbuf[128]; void *mbuf = NULL, *array_buf = NULL; void *parg = (void *)arg; long err = -EINVAL; bool has_array_args; bool always_copy = false; size_t array_size = 0; void __user *user_ptr = NULL; void **kernel_ptr = NULL; unsigned int cmd = video_translate_cmd(orig_cmd); const size_t ioc_size = _IOC_SIZE(cmd); /* Copy arguments into temp kernel buffer */ if (_IOC_DIR(cmd) != _IOC_NONE) { if (ioc_size <= sizeof(sbuf)) { parg = sbuf; } else { /* too big to allocate from stack */ mbuf = kmalloc(ioc_size, GFP_KERNEL); if (NULL == mbuf) return -ENOMEM; parg = mbuf; } err = video_get_user((void __user *)arg, parg, cmd, orig_cmd, &always_copy); if (err) goto out; } err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr); if (err < 0) goto out; has_array_args = err; if (has_array_args) { array_buf = kvmalloc(array_size, GFP_KERNEL); err = -ENOMEM; if (array_buf == NULL) goto out; if (in_compat_syscall()) err = v4l2_compat_get_array_args(file, array_buf, user_ptr, array_size, orig_cmd, parg); else err = copy_from_user(array_buf, user_ptr, array_size) ? -EFAULT : 0; if (err) goto out; *kernel_ptr = array_buf; } /* Handles IOCTL */ err = func(file, cmd, parg); if (err == -ENOTTY || err == -ENOIOCTLCMD) { err = -ENOTTY; goto out; } if (err == 0) { if (cmd == VIDIOC_DQBUF) trace_v4l2_dqbuf(video_devdata(file)->minor, parg); else if (cmd == VIDIOC_QBUF) trace_v4l2_qbuf(video_devdata(file)->minor, parg); } /* * Some ioctls can return an error, but still have valid * results that must be returned. * * FIXME: subdev IOCTLS are partially handled here and partially in * v4l2-subdev.c and the 'always_copy' flag can only be set for IOCTLS * defined here as part of the 'v4l2_ioctls' array. As * VIDIOC_SUBDEV_[GS]_ROUTING needs to return results to applications * even in case of failure, but it is not defined here as part of the * 'v4l2_ioctls' array, insert an ad-hoc check to address that. */ if (cmd == VIDIOC_SUBDEV_G_ROUTING || cmd == VIDIOC_SUBDEV_S_ROUTING) always_copy = true; if (err < 0 && !always_copy) goto out; if (has_array_args) { *kernel_ptr = (void __force *)user_ptr; if (in_compat_syscall()) { int put_err; put_err = v4l2_compat_put_array_args(file, user_ptr, array_buf, array_size, orig_cmd, parg); if (put_err) err = put_err; } else if (copy_to_user(user_ptr, array_buf, array_size)) { err = -EFAULT; } } if (video_put_user((void __user *)arg, parg, cmd, orig_cmd)) err = -EFAULT; out: kvfree(array_buf); kfree(mbuf); return err; } long video_ioctl2(struct file *file, unsigned int cmd, unsigned long arg) { return video_usercopy(file, cmd, arg, __video_do_ioctl); } EXPORT_SYMBOL(video_ioctl2);
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NET_ESP_H #define _NET_ESP_H #include <linux/skbuff.h> struct ip_esp_hdr; struct xfrm_state; static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb) { return (struct ip_esp_hdr *)skb_transport_header(skb); } static inline void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto) { /* Fill padding... */ if (tfclen) { memset(tail, 0, tfclen); tail += tfclen; } do { int i; for (i = 0; i < plen - 2; i++) tail[i] = i + 1; } while (0); tail[plen - 2] = plen - 2; tail[plen - 1] = proto; } struct esp_info { struct ip_esp_hdr *esph; __be64 seqno; int tfclen; int tailen; int plen; int clen; int len; int nfrags; __u8 proto; bool inplace; }; int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp); int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp); int esp_input_done2(struct sk_buff *skb, int err); int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp); int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp); int esp6_input_done2(struct sk_buff *skb, int err); #endif
139 139 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 /* * Copyright (C) 2016 Samsung Electronics Co.Ltd * Authors: * Marek Szyprowski <m.szyprowski@samsung.com> * * DRM core plane blending related functions * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include <linux/export.h> #include <linux/slab.h> #include <linux/sort.h> #include <drm/drm_atomic.h> #include <drm/drm_blend.h> #include <drm/drm_device.h> #include <drm/drm_print.h> #include "drm_crtc_internal.h" /** * DOC: overview * * The basic plane composition model supported by standard plane properties only * has a source rectangle (in logical pixels within the &drm_framebuffer), with * sub-pixel accuracy, which is scaled up to a pixel-aligned destination * rectangle in the visible area of a &drm_crtc. The visible area of a CRTC is * defined by the horizontal and vertical visible pixels (stored in @hdisplay * and @vdisplay) of the requested mode (stored in &drm_crtc_state.mode). These * two rectangles are both stored in the &drm_plane_state. * * For the atomic ioctl the following standard (atomic) properties on the plane object * encode the basic plane composition model: * * SRC_X: * X coordinate offset for the source rectangle within the * &drm_framebuffer, in 16.16 fixed point. Must be positive. * SRC_Y: * Y coordinate offset for the source rectangle within the * &drm_framebuffer, in 16.16 fixed point. Must be positive. * SRC_W: * Width for the source rectangle within the &drm_framebuffer, in 16.16 * fixed point. SRC_X plus SRC_W must be within the width of the source * framebuffer. Must be positive. * SRC_H: * Height for the source rectangle within the &drm_framebuffer, in 16.16 * fixed point. SRC_Y plus SRC_H must be within the height of the source * framebuffer. Must be positive. * CRTC_X: * X coordinate offset for the destination rectangle. Can be negative. * CRTC_Y: * Y coordinate offset for the destination rectangle. Can be negative. * CRTC_W: * Width for the destination rectangle. CRTC_X plus CRTC_W can extend past * the currently visible horizontal area of the &drm_crtc. * CRTC_H: * Height for the destination rectangle. CRTC_Y plus CRTC_H can extend past * the currently visible vertical area of the &drm_crtc. * FB_ID: * Mode object ID of the &drm_framebuffer this plane should scan out. * CRTC_ID: * Mode object ID of the &drm_crtc this plane should be connected to. * * Note that the source rectangle must fully lie within the bounds of the * &drm_framebuffer. The destination rectangle can lie outside of the visible * area of the current mode of the CRTC. It must be appropriately clipped by the * driver, which can be done by calling drm_plane_helper_check_update(). Drivers * are also allowed to round the subpixel sampling positions appropriately, but * only to the next full pixel. No pixel outside of the source rectangle may * ever be sampled, which is important when applying more sophisticated * filtering than just a bilinear one when scaling. The filtering mode when * scaling is unspecified. * * On top of this basic transformation additional properties can be exposed by * the driver: * * alpha: * Alpha is setup with drm_plane_create_alpha_property(). It controls the * plane-wide opacity, from transparent (0) to opaque (0xffff). It can be * combined with pixel alpha. * The pixel values in the framebuffers are expected to not be * pre-multiplied by the global alpha associated to the plane. * * rotation: * Rotation is set up with drm_plane_create_rotation_property(). It adds a * rotation and reflection step between the source and destination rectangles. * Without this property the rectangle is only scaled, but not rotated or * reflected. * * Possbile values: * * "rotate-<degrees>": * Signals that a drm plane is rotated <degrees> degrees in counter * clockwise direction. * * "reflect-<axis>": * Signals that the contents of a drm plane is reflected along the * <axis> axis, in the same way as mirroring. * * reflect-x:: * * |o | | o| * | | -> | | * | v| |v | * * reflect-y:: * * |o | | ^| * | | -> | | * | v| |o | * * zpos: * Z position is set up with drm_plane_create_zpos_immutable_property() and * drm_plane_create_zpos_property(). It controls the visibility of overlapping * planes. Without this property the primary plane is always below the cursor * plane, and ordering between all other planes is undefined. The positive * Z axis points towards the user, i.e. planes with lower Z position values * are underneath planes with higher Z position values. Two planes with the * same Z position value have undefined ordering. Note that the Z position * value can also be immutable, to inform userspace about the hard-coded * stacking of planes, see drm_plane_create_zpos_immutable_property(). If * any plane has a zpos property (either mutable or immutable), then all * planes shall have a zpos property. * * pixel blend mode: * Pixel blend mode is set up with drm_plane_create_blend_mode_property(). * It adds a blend mode for alpha blending equation selection, describing * how the pixels from the current plane are composited with the * background. * * Three alpha blending equations are defined: * * "None": * Blend formula that ignores the pixel alpha:: * * out.rgb = plane_alpha * fg.rgb + * (1 - plane_alpha) * bg.rgb * * "Pre-multiplied": * Blend formula that assumes the pixel color values * have been already pre-multiplied with the alpha * channel values:: * * out.rgb = plane_alpha * fg.rgb + * (1 - (plane_alpha * fg.alpha)) * bg.rgb * * "Coverage": * Blend formula that assumes the pixel color values have not * been pre-multiplied and will do so when blending them to the * background color values:: * * out.rgb = plane_alpha * fg.alpha * fg.rgb + * (1 - (plane_alpha * fg.alpha)) * bg.rgb * * Using the following symbols: * * "fg.rgb": * Each of the RGB component values from the plane's pixel * "fg.alpha": * Alpha component value from the plane's pixel. If the plane's * pixel format has no alpha component, then this is assumed to be * 1.0. In these cases, this property has no effect, as all three * equations become equivalent. * "bg.rgb": * Each of the RGB component values from the background * "plane_alpha": * Plane alpha value set by the plane "alpha" property. If the * plane does not expose the "alpha" property, then this is * assumed to be 1.0 * * Note that all the property extensions described here apply either to the * plane or the CRTC (e.g. for the background color, which currently is not * exposed and assumed to be black). * * SCALING_FILTER: * Indicates scaling filter to be used for plane scaler * * The value of this property can be one of the following: * * Default: * Driver's default scaling filter * Nearest Neighbor: * Nearest Neighbor scaling filter * * Drivers can set up this property for a plane by calling * drm_plane_create_scaling_filter_property */ /** * drm_plane_create_alpha_property - create a new alpha property * @plane: drm plane * * This function creates a generic, mutable, alpha property and enables support * for it in the DRM core. It is attached to @plane. * * The alpha property will be allowed to be within the bounds of 0 * (transparent) to 0xffff (opaque). * * Returns: * 0 on success, negative error code on failure. */ int drm_plane_create_alpha_property(struct drm_plane *plane) { struct drm_property *prop; prop = drm_property_create_range(plane->dev, 0, "alpha", 0, DRM_BLEND_ALPHA_OPAQUE); if (!prop) return -ENOMEM; drm_object_attach_property(&plane->base, prop, DRM_BLEND_ALPHA_OPAQUE); plane->alpha_property = prop; if (plane->state) plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE; return 0; } EXPORT_SYMBOL(drm_plane_create_alpha_property); /** * drm_plane_create_rotation_property - create a new rotation property * @plane: drm plane * @rotation: initial value of the rotation property * @supported_rotations: bitmask of supported rotations and reflections * * This creates a new property with the selected support for transformations. * * Since a rotation by 180° degress is the same as reflecting both along the x * and the y axis the rotation property is somewhat redundant. Drivers can use * drm_rotation_simplify() to normalize values of this property. * * The property exposed to userspace is a bitmask property (see * drm_property_create_bitmask()) called "rotation" and has the following * bitmask enumaration values: * * DRM_MODE_ROTATE_0: * "rotate-0" * DRM_MODE_ROTATE_90: * "rotate-90" * DRM_MODE_ROTATE_180: * "rotate-180" * DRM_MODE_ROTATE_270: * "rotate-270" * DRM_MODE_REFLECT_X: * "reflect-x" * DRM_MODE_REFLECT_Y: * "reflect-y" * * Rotation is the specified amount in degrees in counter clockwise direction, * the X and Y axis are within the source rectangle, i.e. the X/Y axis before * rotation. After reflection, the rotation is applied to the image sampled from * the source rectangle, before scaling it to fit the destination rectangle. */ int drm_plane_create_rotation_property(struct drm_plane *plane, unsigned int rotation, unsigned int supported_rotations) { static const struct drm_prop_enum_list props[] = { { __builtin_ffs(DRM_MODE_ROTATE_0) - 1, "rotate-0" }, { __builtin_ffs(DRM_MODE_ROTATE_90) - 1, "rotate-90" }, { __builtin_ffs(DRM_MODE_ROTATE_180) - 1, "rotate-180" }, { __builtin_ffs(DRM_MODE_ROTATE_270) - 1, "rotate-270" }, { __builtin_ffs(DRM_MODE_REFLECT_X) - 1, "reflect-x" }, { __builtin_ffs(DRM_MODE_REFLECT_Y) - 1, "reflect-y" }, }; struct drm_property *prop; WARN_ON((supported_rotations & DRM_MODE_ROTATE_MASK) == 0); WARN_ON(!is_power_of_2(rotation & DRM_MODE_ROTATE_MASK)); WARN_ON(rotation & ~supported_rotations); prop = drm_property_create_bitmask(plane->dev, 0, "rotation", props, ARRAY_SIZE(props), supported_rotations); if (!prop) return -ENOMEM; drm_object_attach_property(&plane->base, prop, rotation); if (plane->state) plane->state->rotation = rotation; plane->rotation_property = prop; return 0; } EXPORT_SYMBOL(drm_plane_create_rotation_property); /** * drm_rotation_simplify() - Try to simplify the rotation * @rotation: Rotation to be simplified * @supported_rotations: Supported rotations * * Attempt to simplify the rotation to a form that is supported. * Eg. if the hardware supports everything except DRM_MODE_REFLECT_X * one could call this function like this: * * drm_rotation_simplify(rotation, DRM_MODE_ROTATE_0 | * DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 | * DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_Y); * * to eliminate the DRM_MODE_REFLECT_X flag. Depending on what kind of * transforms the hardware supports, this function may not * be able to produce a supported transform, so the caller should * check the result afterwards. */ unsigned int drm_rotation_simplify(unsigned int rotation, unsigned int supported_rotations) { if (rotation & ~supported_rotations) { rotation ^= DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y; rotation = (rotation & DRM_MODE_REFLECT_MASK) | BIT((ffs(rotation & DRM_MODE_ROTATE_MASK) + 1) % 4); } return rotation; } EXPORT_SYMBOL(drm_rotation_simplify); /** * drm_plane_create_zpos_property - create mutable zpos property * @plane: drm plane * @zpos: initial value of zpos property * @min: minimal possible value of zpos property * @max: maximal possible value of zpos property * * This function initializes generic mutable zpos property and enables support * for it in drm core. Drivers can then attach this property to planes to enable * support for configurable planes arrangement during blending operation. * Drivers that attach a mutable zpos property to any plane should call the * drm_atomic_normalize_zpos() helper during their implementation of * &drm_mode_config_funcs.atomic_check(), which will update the normalized zpos * values and store them in &drm_plane_state.normalized_zpos. Usually min * should be set to 0 and max to maximal number of planes for given crtc - 1. * * If zpos of some planes cannot be changed (like fixed background or * cursor/topmost planes), drivers shall adjust the min/max values and assign * those planes immutable zpos properties with lower or higher values (for more * information, see drm_plane_create_zpos_immutable_property() function). In such * case drivers shall also assign proper initial zpos values for all planes in * its plane_reset() callback, so the planes will be always sorted properly. * * See also drm_atomic_normalize_zpos(). * * The property exposed to userspace is called "zpos". * * Returns: * Zero on success, negative errno on failure. */ int drm_plane_create_zpos_property(struct drm_plane *plane, unsigned int zpos, unsigned int min, unsigned int max) { struct drm_property *prop; prop = drm_property_create_range(plane->dev, 0, "zpos", min, max); if (!prop) return -ENOMEM; drm_object_attach_property(&plane->base, prop, zpos); plane->zpos_property = prop; if (plane->state) { plane->state->zpos = zpos; plane->state->normalized_zpos = zpos; } return 0; } EXPORT_SYMBOL(drm_plane_create_zpos_property); /** * drm_plane_create_zpos_immutable_property - create immuttable zpos property * @plane: drm plane * @zpos: value of zpos property * * This function initializes generic immutable zpos property and enables * support for it in drm core. Using this property driver lets userspace * to get the arrangement of the planes for blending operation and notifies * it that the hardware (or driver) doesn't support changing of the planes' * order. For mutable zpos see drm_plane_create_zpos_property(). * * The property exposed to userspace is called "zpos". * * Returns: * Zero on success, negative errno on failure. */ int drm_plane_create_zpos_immutable_property(struct drm_plane *plane, unsigned int zpos) { struct drm_property *prop; prop = drm_property_create_range(plane->dev, DRM_MODE_PROP_IMMUTABLE, "zpos", zpos, zpos); if (!prop) return -ENOMEM; drm_object_attach_property(&plane->base, prop, zpos); plane->zpos_property = prop; if (plane->state) { plane->state->zpos = zpos; plane->state->normalized_zpos = zpos; } return 0; } EXPORT_SYMBOL(drm_plane_create_zpos_immutable_property); static int drm_atomic_state_zpos_cmp(const void *a, const void *b) { const struct drm_plane_state *sa = *(struct drm_plane_state **)a; const struct drm_plane_state *sb = *(struct drm_plane_state **)b; if (sa->zpos != sb->zpos) return sa->zpos - sb->zpos; else return sa->plane->base.id - sb->plane->base.id; } static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) { struct drm_atomic_state *state = crtc_state->state; struct drm_device *dev = crtc->dev; int total_planes = dev->mode_config.num_total_plane; struct drm_plane_state **states; struct drm_plane *plane; int i, n = 0; int ret = 0; drm_dbg_atomic(dev, "[CRTC:%d:%s] calculating normalized zpos values\n", crtc->base.id, crtc->name); states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL); if (!states) return -ENOMEM; /* * Normalization process might create new states for planes which * normalized_zpos has to be recalculated. */ drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) { struct drm_plane_state *plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) { ret = PTR_ERR(plane_state); goto done; } states[n++] = plane_state; drm_dbg_atomic(dev, "[PLANE:%d:%s] processing zpos value %d\n", plane->base.id, plane->name, plane_state->zpos); } sort(states, n, sizeof(*states), drm_atomic_state_zpos_cmp, NULL); for (i = 0; i < n; i++) { plane = states[i]->plane; states[i]->normalized_zpos = i; drm_dbg_atomic(dev, "[PLANE:%d:%s] normalized zpos value %d\n", plane->base.id, plane->name, i); } crtc_state->zpos_changed = true; done: kfree(states); return ret; } /** * drm_atomic_normalize_zpos - calculate normalized zpos values for all crtcs * @dev: DRM device * @state: atomic state of DRM device * * This function calculates normalized zpos value for all modified planes in * the provided atomic state of DRM device. * * For every CRTC this function checks new states of all planes assigned to * it and calculates normalized zpos value for these planes. Planes are compared * first by their zpos values, then by plane id (if zpos is equal). The plane * with lowest zpos value is at the bottom. The &drm_plane_state.normalized_zpos * is then filled with unique values from 0 to number of active planes in crtc * minus one. * * RETURNS * Zero for success or -errno */ int drm_atomic_normalize_zpos(struct drm_device *dev, struct drm_atomic_state *state) { struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; int i, ret = 0; for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { crtc = new_plane_state->crtc; if (!crtc) continue; if (old_plane_state->zpos != new_plane_state->zpos) { new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); new_crtc_state->zpos_changed = true; } } for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (old_crtc_state->plane_mask != new_crtc_state->plane_mask || new_crtc_state->zpos_changed) { ret = drm_atomic_helper_crtc_normalize_zpos(crtc, new_crtc_state); if (ret) return ret; } } return 0; } EXPORT_SYMBOL(drm_atomic_normalize_zpos); /** * drm_plane_create_blend_mode_property - create a new blend mode property * @plane: drm plane * @supported_modes: bitmask of supported modes, must include * BIT(DRM_MODE_BLEND_PREMULTI). Current DRM assumption is * that alpha is premultiplied, and old userspace can break if * the property defaults to anything else. * * This creates a new property describing the blend mode. * * The property exposed to userspace is an enumeration property (see * drm_property_create_enum()) called "pixel blend mode" and has the * following enumeration values: * * "None": * Blend formula that ignores the pixel alpha. * * "Pre-multiplied": * Blend formula that assumes the pixel color values have been already * pre-multiplied with the alpha channel values. * * "Coverage": * Blend formula that assumes the pixel color values have not been * pre-multiplied and will do so when blending them to the background color * values. * * RETURNS: * Zero for success or -errno */ int drm_plane_create_blend_mode_property(struct drm_plane *plane, unsigned int supported_modes) { struct drm_device *dev = plane->dev; struct drm_property *prop; static const struct drm_prop_enum_list props[] = { { DRM_MODE_BLEND_PIXEL_NONE, "None" }, { DRM_MODE_BLEND_PREMULTI, "Pre-multiplied" }, { DRM_MODE_BLEND_COVERAGE, "Coverage" }, }; unsigned int valid_mode_mask = BIT(DRM_MODE_BLEND_PIXEL_NONE) | BIT(DRM_MODE_BLEND_PREMULTI) | BIT(DRM_MODE_BLEND_COVERAGE); int i; if (WARN_ON((supported_modes & ~valid_mode_mask) || ((supported_modes & BIT(DRM_MODE_BLEND_PREMULTI)) == 0))) return -EINVAL; prop = drm_property_create(dev, DRM_MODE_PROP_ENUM, "pixel blend mode", hweight32(supported_modes)); if (!prop) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(props); i++) { int ret; if (!(BIT(props[i].type) & supported_modes)) continue; ret = drm_property_add_enum(prop, props[i].type, props[i].name); if (ret) { drm_property_destroy(dev, prop); return ret; } } drm_object_attach_property(&plane->base, prop, DRM_MODE_BLEND_PREMULTI); plane->blend_mode_property = prop; return 0; } EXPORT_SYMBOL(drm_plane_create_blend_mode_property);
64 64 41 51 51 28 28 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 // SPDX-License-Identifier: GPL-2.0-or-later /* * ALSA timer back-end using hrtimer * Copyright (C) 2008 Takashi Iwai */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/hrtimer.h> #include <sound/core.h> #include <sound/timer.h> MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ALSA hrtimer backend"); MODULE_LICENSE("GPL"); MODULE_ALIAS("snd-timer-" __stringify(SNDRV_TIMER_GLOBAL_HRTIMER)); #define NANO_SEC 1000000000UL /* 10^9 in sec */ static unsigned int resolution; struct snd_hrtimer { struct snd_timer *timer; struct hrtimer hrt; bool in_callback; }; static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt) { struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt); struct snd_timer *t = stime->timer; ktime_t delta; unsigned long ticks; enum hrtimer_restart ret = HRTIMER_NORESTART; scoped_guard(spinlock, &t->lock) { if (!t->running) return HRTIMER_NORESTART; /* fast path */ stime->in_callback = true; ticks = t->sticks; } /* calculate the drift */ delta = ktime_sub(hrt->base->get_time(), hrtimer_get_expires(hrt)); if (delta > 0) ticks += ktime_divns(delta, ticks * resolution); snd_timer_interrupt(stime->timer, ticks); guard(spinlock)(&t->lock); if (t->running) { hrtimer_add_expires_ns(hrt, t->sticks * resolution); ret = HRTIMER_RESTART; } stime->in_callback = false; return ret; } static int snd_hrtimer_open(struct snd_timer *t) { struct snd_hrtimer *stime; stime = kzalloc(sizeof(*stime), GFP_KERNEL); if (!stime) return -ENOMEM; hrtimer_init(&stime->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); stime->timer = t; stime->hrt.function = snd_hrtimer_callback; t->private_data = stime; return 0; } static int snd_hrtimer_close(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; if (stime) { scoped_guard(spinlock_irq, &t->lock) { t->running = 0; /* just to be sure */ stime->in_callback = 1; /* skip start/stop */ } hrtimer_cancel(&stime->hrt); kfree(stime); t->private_data = NULL; } return 0; } static int snd_hrtimer_start(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; if (stime->in_callback) return 0; hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution), HRTIMER_MODE_REL); return 0; } static int snd_hrtimer_stop(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; if (stime->in_callback) return 0; hrtimer_try_to_cancel(&stime->hrt); return 0; } static const struct snd_timer_hardware hrtimer_hw __initconst = { .flags = SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_WORK, .open = snd_hrtimer_open, .close = snd_hrtimer_close, .start = snd_hrtimer_start, .stop = snd_hrtimer_stop, }; /* * entry functions */ static struct snd_timer *mytimer; static int __init snd_hrtimer_init(void) { struct snd_timer *timer; int err; resolution = hrtimer_resolution; /* Create a new timer and set up the fields */ err = snd_timer_global_new("hrtimer", SNDRV_TIMER_GLOBAL_HRTIMER, &timer); if (err < 0) return err; timer->module = THIS_MODULE; strcpy(timer->name, "HR timer"); timer->hw = hrtimer_hw; timer->hw.resolution = resolution; timer->hw.ticks = NANO_SEC / resolution; timer->max_instances = 100; /* lower the limit */ err = snd_timer_global_register(timer); if (err < 0) { snd_timer_global_free(timer); return err; } mytimer = timer; /* remember this */ return 0; } static void __exit snd_hrtimer_exit(void) { if (mytimer) { snd_timer_global_free(mytimer); mytimer = NULL; } } module_init(snd_hrtimer_init); module_exit(snd_hrtimer_exit);
9 6 2 6 3 1 1 1 8 9 1 1 1 44 1 1 7 7 3 3 4 1 3 4 4 4 4 8 1 2 5 1 4 3 3 2 2 1 1 4 10 12 338 1 575 12 388 81 32 11 1 2 1 2 2 2 1 20 1 2 350 8 4 9 14 1194 1165 67 58 905 901 3 881 22 290 3 1 53 49 4 251 1 27 3 15 187 181 357 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 // SPDX-License-Identifier: GPL-2.0 #include <linux/kmod.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <linux/net_tstamp.h> #include <linux/phylib_stubs.h> #include <linux/wireless.h> #include <linux/if_bridge.h> #include <net/dsa_stubs.h> #include <net/wext.h> #include "dev.h" /* * Map an interface index to its name (SIOCGIFNAME) */ /* * We need this ioctl for efficient implementation of the * if_indextoname() function required by the IPv6 API. Without * it, we would have to search all the interfaces to find a * match. --pb */ static int dev_ifname(struct net *net, struct ifreq *ifr) { ifr->ifr_name[IFNAMSIZ-1] = 0; return netdev_get_name(net, ifr->ifr_name, ifr->ifr_ifindex); } /* * Perform a SIOCGIFCONF call. This structure will change * size eventually, and there is nothing I can do about it. * Thus we will need a 'compatibility mode'. */ int dev_ifconf(struct net *net, struct ifconf __user *uifc) { struct net_device *dev; void __user *pos; size_t size; int len, total = 0, done; /* both the ifconf and the ifreq structures are slightly different */ if (in_compat_syscall()) { struct compat_ifconf ifc32; if (copy_from_user(&ifc32, uifc, sizeof(struct compat_ifconf))) return -EFAULT; pos = compat_ptr(ifc32.ifcbuf); len = ifc32.ifc_len; size = sizeof(struct compat_ifreq); } else { struct ifconf ifc; if (copy_from_user(&ifc, uifc, sizeof(struct ifconf))) return -EFAULT; pos = ifc.ifc_buf; len = ifc.ifc_len; size = sizeof(struct ifreq); } /* Loop over the interfaces, and write an info block for each. */ rtnl_lock(); for_each_netdev(net, dev) { if (!pos) done = inet_gifconf(dev, NULL, 0, size); else done = inet_gifconf(dev, pos + total, len - total, size); if (done < 0) { rtnl_unlock(); return -EFAULT; } total += done; } rtnl_unlock(); return put_user(total, &uifc->ifc_len); } static int dev_getifmap(struct net_device *dev, struct ifreq *ifr) { struct ifmap *ifmap = &ifr->ifr_map; if (in_compat_syscall()) { struct compat_ifmap *cifmap = (struct compat_ifmap *)ifmap; cifmap->mem_start = dev->mem_start; cifmap->mem_end = dev->mem_end; cifmap->base_addr = dev->base_addr; cifmap->irq = dev->irq; cifmap->dma = dev->dma; cifmap->port = dev->if_port; return 0; } ifmap->mem_start = dev->mem_start; ifmap->mem_end = dev->mem_end; ifmap->base_addr = dev->base_addr; ifmap->irq = dev->irq; ifmap->dma = dev->dma; ifmap->port = dev->if_port; return 0; } static int dev_setifmap(struct net_device *dev, struct ifreq *ifr) { struct compat_ifmap *cifmap = (struct compat_ifmap *)&ifr->ifr_map; if (!dev->netdev_ops->ndo_set_config) return -EOPNOTSUPP; if (in_compat_syscall()) { struct ifmap ifmap = { .mem_start = cifmap->mem_start, .mem_end = cifmap->mem_end, .base_addr = cifmap->base_addr, .irq = cifmap->irq, .dma = cifmap->dma, .port = cifmap->port, }; return dev->netdev_ops->ndo_set_config(dev, &ifmap); } return dev->netdev_ops->ndo_set_config(dev, &ifr->ifr_map); } /* * Perform the SIOCxIFxxx calls, inside rcu_read_lock() */ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd) { int err; struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name); if (!dev) return -ENODEV; switch (cmd) { case SIOCGIFFLAGS: /* Get interface flags */ ifr->ifr_flags = (short) dev_get_flags(dev); return 0; case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */ ifr->ifr_metric = 0; return 0; case SIOCGIFMTU: /* Get the MTU of a device */ ifr->ifr_mtu = dev->mtu; return 0; case SIOCGIFSLAVE: err = -EINVAL; break; case SIOCGIFMAP: return dev_getifmap(dev, ifr); case SIOCGIFINDEX: ifr->ifr_ifindex = dev->ifindex; return 0; case SIOCGIFTXQLEN: ifr->ifr_qlen = dev->tx_queue_len; return 0; default: /* dev_ioctl() should ensure this case * is never reached */ WARN_ON(1); err = -ENOTTY; break; } return err; } static int net_hwtstamp_validate(const struct kernel_hwtstamp_config *cfg) { enum hwtstamp_tx_types tx_type; enum hwtstamp_rx_filters rx_filter; int tx_type_valid = 0; int rx_filter_valid = 0; if (cfg->flags & ~HWTSTAMP_FLAG_MASK) return -EINVAL; tx_type = cfg->tx_type; rx_filter = cfg->rx_filter; switch (tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: case HWTSTAMP_TX_ONESTEP_SYNC: case HWTSTAMP_TX_ONESTEP_P2P: tx_type_valid = 1; break; case __HWTSTAMP_TX_CNT: /* not a real value */ break; } switch (rx_filter) { case HWTSTAMP_FILTER_NONE: case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: rx_filter_valid = 1; break; case __HWTSTAMP_FILTER_CNT: /* not a real value */ break; } if (!tx_type_valid || !rx_filter_valid) return -ERANGE; return 0; } static int dev_eth_ioctl(struct net_device *dev, struct ifreq *ifr, unsigned int cmd) { const struct net_device_ops *ops = dev->netdev_ops; if (!ops->ndo_eth_ioctl) return -EOPNOTSUPP; if (!netif_device_present(dev)) return -ENODEV; return ops->ndo_eth_ioctl(dev, ifr, cmd); } /** * dev_get_hwtstamp_phylib() - Get hardware timestamping settings of NIC * or of attached phylib PHY * @dev: Network device * @cfg: Timestamping configuration structure * * Helper for calling the default hardware provider timestamping. * * Note: phy_mii_ioctl() only handles SIOCSHWTSTAMP (not SIOCGHWTSTAMP), and * there only exists a phydev->mii_ts->hwtstamp() method. So this will return * -EOPNOTSUPP for phylib for now, which is still more accurate than letting * the netdev handle the GET request. */ static int dev_get_hwtstamp_phylib(struct net_device *dev, struct kernel_hwtstamp_config *cfg) { if (phy_is_default_hwtstamp(dev->phydev)) return phy_hwtstamp_get(dev->phydev, cfg); return dev->netdev_ops->ndo_hwtstamp_get(dev, cfg); } static int dev_get_hwtstamp(struct net_device *dev, struct ifreq *ifr) { const struct net_device_ops *ops = dev->netdev_ops; struct kernel_hwtstamp_config kernel_cfg = {}; struct hwtstamp_config cfg; int err; if (!ops->ndo_hwtstamp_get) return dev_eth_ioctl(dev, ifr, SIOCGHWTSTAMP); /* legacy */ if (!netif_device_present(dev)) return -ENODEV; kernel_cfg.ifr = ifr; err = dev_get_hwtstamp_phylib(dev, &kernel_cfg); if (err) return err; /* If the request was resolved through an unconverted driver, omit * the copy_to_user(), since the implementation has already done that */ if (!kernel_cfg.copied_to_user) { hwtstamp_config_from_kernel(&cfg, &kernel_cfg); if (copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg))) return -EFAULT; } return 0; } /** * dev_set_hwtstamp_phylib() - Change hardware timestamping of NIC * or of attached phylib PHY * @dev: Network device * @cfg: Timestamping configuration structure * @extack: Netlink extended ack message structure, for error reporting * * Helper for enforcing a common policy that phylib timestamping, if available, * should take precedence in front of hardware timestamping provided by the * netdev. If the netdev driver needs to perform specific actions even for PHY * timestamping to work properly (a switch port must trap the timestamped * frames and not forward them), it must set IFF_SEE_ALL_HWTSTAMP_REQUESTS in * dev->priv_flags. */ int dev_set_hwtstamp_phylib(struct net_device *dev, struct kernel_hwtstamp_config *cfg, struct netlink_ext_ack *extack) { const struct net_device_ops *ops = dev->netdev_ops; bool phy_ts = phy_is_default_hwtstamp(dev->phydev); struct kernel_hwtstamp_config old_cfg = {}; bool changed = false; int err; cfg->source = phy_ts ? HWTSTAMP_SOURCE_PHYLIB : HWTSTAMP_SOURCE_NETDEV; if (phy_ts && (dev->priv_flags & IFF_SEE_ALL_HWTSTAMP_REQUESTS)) { err = ops->ndo_hwtstamp_get(dev, &old_cfg); if (err) return err; } if (!phy_ts || (dev->priv_flags & IFF_SEE_ALL_HWTSTAMP_REQUESTS)) { err = ops->ndo_hwtstamp_set(dev, cfg, extack); if (err) { if (extack->_msg) netdev_err(dev, "%s\n", extack->_msg); return err; } } if (phy_ts && (dev->priv_flags & IFF_SEE_ALL_HWTSTAMP_REQUESTS)) changed = kernel_hwtstamp_config_changed(&old_cfg, cfg); if (phy_ts) { err = phy_hwtstamp_set(dev->phydev, cfg, extack); if (err) { if (changed) ops->ndo_hwtstamp_set(dev, &old_cfg, NULL); return err; } } return 0; } static int dev_set_hwtstamp(struct net_device *dev, struct ifreq *ifr) { const struct net_device_ops *ops = dev->netdev_ops; struct kernel_hwtstamp_config kernel_cfg = {}; struct netlink_ext_ack extack = {}; struct hwtstamp_config cfg; int err; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; hwtstamp_config_to_kernel(&kernel_cfg, &cfg); kernel_cfg.ifr = ifr; err = net_hwtstamp_validate(&kernel_cfg); if (err) return err; err = dsa_conduit_hwtstamp_validate(dev, &kernel_cfg, &extack); if (err) { if (extack._msg) netdev_err(dev, "%s\n", extack._msg); return err; } if (!ops->ndo_hwtstamp_set) return dev_eth_ioctl(dev, ifr, SIOCSHWTSTAMP); /* legacy */ if (!netif_device_present(dev)) return -ENODEV; err = dev_set_hwtstamp_phylib(dev, &kernel_cfg, &extack); if (err) return err; /* The driver may have modified the configuration, so copy the * updated version of it back to user space */ if (!kernel_cfg.copied_to_user) { hwtstamp_config_from_kernel(&cfg, &kernel_cfg); if (copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg))) return -EFAULT; } return 0; } static int generic_hwtstamp_ioctl_lower(struct net_device *dev, int cmd, struct kernel_hwtstamp_config *kernel_cfg) { struct ifreq ifrr; int err; strscpy_pad(ifrr.ifr_name, dev->name, IFNAMSIZ); ifrr.ifr_ifru = kernel_cfg->ifr->ifr_ifru; err = dev_eth_ioctl(dev, &ifrr, cmd); if (err) return err; kernel_cfg->ifr->ifr_ifru = ifrr.ifr_ifru; kernel_cfg->copied_to_user = true; return 0; } int generic_hwtstamp_get_lower(struct net_device *dev, struct kernel_hwtstamp_config *kernel_cfg) { const struct net_device_ops *ops = dev->netdev_ops; if (!netif_device_present(dev)) return -ENODEV; if (ops->ndo_hwtstamp_get) return dev_get_hwtstamp_phylib(dev, kernel_cfg); /* Legacy path: unconverted lower driver */ return generic_hwtstamp_ioctl_lower(dev, SIOCGHWTSTAMP, kernel_cfg); } EXPORT_SYMBOL(generic_hwtstamp_get_lower); int generic_hwtstamp_set_lower(struct net_device *dev, struct kernel_hwtstamp_config *kernel_cfg, struct netlink_ext_ack *extack) { const struct net_device_ops *ops = dev->netdev_ops; if (!netif_device_present(dev)) return -ENODEV; if (ops->ndo_hwtstamp_set) return dev_set_hwtstamp_phylib(dev, kernel_cfg, extack); /* Legacy path: unconverted lower driver */ return generic_hwtstamp_ioctl_lower(dev, SIOCSHWTSTAMP, kernel_cfg); } EXPORT_SYMBOL(generic_hwtstamp_set_lower); static int dev_siocbond(struct net_device *dev, struct ifreq *ifr, unsigned int cmd) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_siocbond) { if (netif_device_present(dev)) return ops->ndo_siocbond(dev, ifr, cmd); else return -ENODEV; } return -EOPNOTSUPP; } static int dev_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, unsigned int cmd) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_siocdevprivate) { if (netif_device_present(dev)) return ops->ndo_siocdevprivate(dev, ifr, data, cmd); else return -ENODEV; } return -EOPNOTSUPP; } static int dev_siocwandev(struct net_device *dev, struct if_settings *ifs) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_siocwandev) { if (netif_device_present(dev)) return ops->ndo_siocwandev(dev, ifs); else return -ENODEV; } return -EOPNOTSUPP; } /* * Perform the SIOCxIFxxx calls, inside rtnl_lock() */ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data, unsigned int cmd) { int err; struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); const struct net_device_ops *ops; netdevice_tracker dev_tracker; if (!dev) return -ENODEV; ops = dev->netdev_ops; switch (cmd) { case SIOCSIFFLAGS: /* Set interface flags */ return dev_change_flags(dev, ifr->ifr_flags, NULL); case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */ return -EOPNOTSUPP; case SIOCSIFMTU: /* Set the MTU of a device */ return dev_set_mtu(dev, ifr->ifr_mtu); case SIOCSIFHWADDR: if (dev->addr_len > sizeof(struct sockaddr)) return -EINVAL; return dev_set_mac_address_user(dev, &ifr->ifr_hwaddr, NULL); case SIOCSIFHWBROADCAST: if (ifr->ifr_hwaddr.sa_family != dev->type) return -EINVAL; memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, min(sizeof(ifr->ifr_hwaddr.sa_data_min), (size_t)dev->addr_len)); call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return 0; case SIOCSIFMAP: return dev_setifmap(dev, ifr); case SIOCADDMULTI: if (!ops->ndo_set_rx_mode || ifr->ifr_hwaddr.sa_family != AF_UNSPEC) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); case SIOCDELMULTI: if (!ops->ndo_set_rx_mode || ifr->ifr_hwaddr.sa_family != AF_UNSPEC) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); case SIOCSIFTXQLEN: if (ifr->ifr_qlen < 0) return -EINVAL; return dev_change_tx_queue_len(dev, ifr->ifr_qlen); case SIOCSIFNAME: ifr->ifr_newname[IFNAMSIZ-1] = '\0'; return dev_change_name(dev, ifr->ifr_newname); case SIOCWANDEV: return dev_siocwandev(dev, &ifr->ifr_settings); case SIOCBRADDIF: case SIOCBRDELIF: if (!netif_device_present(dev)) return -ENODEV; if (!netif_is_bridge_master(dev)) return -EOPNOTSUPP; netdev_hold(dev, &dev_tracker, GFP_KERNEL); rtnl_unlock(); err = br_ioctl_call(net, netdev_priv(dev), cmd, ifr, NULL); netdev_put(dev, &dev_tracker); rtnl_lock(); return err; case SIOCDEVPRIVATE ... SIOCDEVPRIVATE + 15: return dev_siocdevprivate(dev, ifr, data, cmd); case SIOCSHWTSTAMP: return dev_set_hwtstamp(dev, ifr); case SIOCGHWTSTAMP: return dev_get_hwtstamp(dev, ifr); case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return dev_eth_ioctl(dev, ifr, cmd); case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: case SIOCBONDCHANGEACTIVE: return dev_siocbond(dev, ifr, cmd); /* Unknown ioctl */ default: err = -EINVAL; } return err; } /** * dev_load - load a network module * @net: the applicable net namespace * @name: name of interface * * If a network interface is not present and the process has suitable * privileges this function loads the module. If module loading is not * available in this kernel then it becomes a nop. */ void dev_load(struct net *net, const char *name) { struct net_device *dev; int no_module; rcu_read_lock(); dev = dev_get_by_name_rcu(net, name); rcu_read_unlock(); no_module = !dev; if (no_module && capable(CAP_NET_ADMIN)) no_module = request_module("netdev-%s", name); if (no_module && capable(CAP_SYS_MODULE)) request_module("%s", name); } EXPORT_SYMBOL(dev_load); /* * This function handles all "interface"-type I/O control requests. The actual * 'doing' part of this is dev_ifsioc above. */ /** * dev_ioctl - network device ioctl * @net: the applicable net namespace * @cmd: command to issue * @ifr: pointer to a struct ifreq in user space * @data: data exchanged with userspace * @need_copyout: whether or not copy_to_user() should be called * * Issue ioctl functions to devices. This is normally called by the * user space syscall interfaces but can sometimes be useful for * other purposes. The return value is the return from the syscall if * positive or a negative errno code on error. */ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, void __user *data, bool *need_copyout) { int ret; char *colon; if (need_copyout) *need_copyout = true; if (cmd == SIOCGIFNAME) return dev_ifname(net, ifr); ifr->ifr_name[IFNAMSIZ-1] = 0; colon = strchr(ifr->ifr_name, ':'); if (colon) *colon = 0; /* * See which interface the caller is talking about. */ switch (cmd) { case SIOCGIFHWADDR: dev_load(net, ifr->ifr_name); ret = dev_get_mac_address(&ifr->ifr_hwaddr, net, ifr->ifr_name); if (colon) *colon = ':'; return ret; /* * These ioctl calls: * - can be done by all. * - atomic and do not require locking. * - return a value */ case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: case SIOCGIFSLAVE: case SIOCGIFMAP: case SIOCGIFINDEX: case SIOCGIFTXQLEN: dev_load(net, ifr->ifr_name); rcu_read_lock(); ret = dev_ifsioc_locked(net, ifr, cmd); rcu_read_unlock(); if (colon) *colon = ':'; return ret; case SIOCETHTOOL: dev_load(net, ifr->ifr_name); ret = dev_ethtool(net, ifr, data); if (colon) *colon = ':'; return ret; /* * These ioctl calls: * - require superuser power. * - require strict serialization. * - return a value */ case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSIFNAME: dev_load(net, ifr->ifr_name); if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; rtnl_lock(); ret = dev_ifsioc(net, ifr, data, cmd); rtnl_unlock(); if (colon) *colon = ':'; return ret; /* * These ioctl calls: * - require superuser power. * - require strict serialization. * - do not return a value */ case SIOCSIFMAP: case SIOCSIFTXQLEN: if (!capable(CAP_NET_ADMIN)) return -EPERM; fallthrough; /* * These ioctl calls: * - require local superuser power. * - require strict serialization. * - do not return a value */ case SIOCSIFFLAGS: case SIOCSIFMETRIC: case SIOCSIFMTU: case SIOCSIFHWADDR: case SIOCSIFSLAVE: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCSIFHWBROADCAST: case SIOCSMIIREG: case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: case SIOCBRADDIF: case SIOCBRDELIF: case SIOCSHWTSTAMP: if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; fallthrough; case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: dev_load(net, ifr->ifr_name); rtnl_lock(); ret = dev_ifsioc(net, ifr, data, cmd); rtnl_unlock(); if (need_copyout) *need_copyout = false; return ret; case SIOCGIFMEM: /* Get the per device memory space. We can add this but * currently do not support it */ case SIOCSIFMEM: /* Set the per device memory buffer space. * Not applicable in our case */ case SIOCSIFLINK: return -ENOTTY; /* * Unknown or private ioctl. */ default: if (cmd == SIOCWANDEV || cmd == SIOCGHWTSTAMP || (cmd >= SIOCDEVPRIVATE && cmd <= SIOCDEVPRIVATE + 15)) { dev_load(net, ifr->ifr_name); rtnl_lock(); ret = dev_ifsioc(net, ifr, data, cmd); rtnl_unlock(); return ret; } return -ENOTTY; } }
68 68 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 // SPDX-License-Identifier: GPL-2.0-or-later /* * ip_vs_ftp.c: IPVS ftp application module * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * * Changes: * * Most code here is taken from ip_masq_ftp.c in kernel 2.2. The difference * is that ip_vs_ftp module handles the reverse direction to ip_masq_ftp. * * IP_MASQ_FTP ftp masquerading module * * Version: @(#)ip_masq_ftp.c 0.04 02/05/96 * * Author: Wouter Gadeyne */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/ctype.h> #include <linux/inet.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/netfilter.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_helper.h> #include <linux/gfp.h> #include <net/protocol.h> #include <net/tcp.h> #include <asm/unaligned.h> #include <net/ip_vs.h> #define SERVER_STRING_PASV "227 " #define CLIENT_STRING_PORT "PORT" #define SERVER_STRING_EPSV "229 " #define CLIENT_STRING_EPRT "EPRT" enum { IP_VS_FTP_ACTIVE = 0, IP_VS_FTP_PORT = 0, IP_VS_FTP_PASV, IP_VS_FTP_EPRT, IP_VS_FTP_EPSV, }; /* * List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper * First port is set to the default port. */ static unsigned int ports_count = 1; static unsigned short ports[IP_VS_APP_MAX_PORTS] = {21, 0}; module_param_array(ports, ushort, &ports_count, 0444); MODULE_PARM_DESC(ports, "Ports to monitor for FTP control commands"); static char *ip_vs_ftp_data_ptr(struct sk_buff *skb, struct ip_vs_iphdr *ipvsh) { struct tcphdr *th = (struct tcphdr *)((char *)skb->data + ipvsh->len); if ((th->doff << 2) < sizeof(struct tcphdr)) return NULL; return (char *)th + (th->doff << 2); } static int ip_vs_ftp_init_conn(struct ip_vs_app *app, struct ip_vs_conn *cp) { /* We use connection tracking for the command connection */ cp->flags |= IP_VS_CONN_F_NFCT; return 0; } static int ip_vs_ftp_done_conn(struct ip_vs_app *app, struct ip_vs_conn *cp) { return 0; } /* Get <addr,port> from the string "xxx.xxx.xxx.xxx,ppp,ppp", started * with the "pattern". <addr,port> is in network order. * Parse extended format depending on ext. In this case addr can be pre-set. */ static int ip_vs_ftp_get_addrport(char *data, char *data_limit, const char *pattern, size_t plen, char skip, bool ext, int mode, union nf_inet_addr *addr, __be16 *port, __u16 af, char **start, char **end) { char *s, c; unsigned char p[6]; char edelim; __u16 hport; int i = 0; if (data_limit - data < plen) { /* check if there is partial match */ if (strncasecmp(data, pattern, data_limit - data) == 0) return -1; else return 0; } if (strncasecmp(data, pattern, plen) != 0) { return 0; } s = data + plen; if (skip) { bool found = false; for (;; s++) { if (s == data_limit) return -1; if (!found) { /* "(" is optional for non-extended format, * so catch the start of IPv4 address */ if (!ext && isdigit(*s)) break; if (*s == skip) found = true; } else if (*s != skip) { break; } } } /* Old IPv4-only format? */ if (!ext) { p[0] = 0; for (data = s; ; data++) { if (data == data_limit) return -1; c = *data; if (isdigit(c)) { p[i] = p[i]*10 + c - '0'; } else if (c == ',' && i < 5) { i++; p[i] = 0; } else { /* unexpected character or terminator */ break; } } if (i != 5) return -1; *start = s; *end = data; addr->ip = get_unaligned((__be32 *) p); *port = get_unaligned((__be16 *) (p + 4)); return 1; } if (s == data_limit) return -1; *start = s; edelim = *s++; if (edelim < 33 || edelim > 126) return -1; if (s == data_limit) return -1; if (*s == edelim) { /* Address family is usually missing for EPSV response */ if (mode != IP_VS_FTP_EPSV) return -1; s++; if (s == data_limit) return -1; /* Then address should be missing too */ if (*s != edelim) return -1; /* Caller can pre-set addr, if needed */ s++; } else { const char *ep; /* We allow address only from same family */ if (af == AF_INET6 && *s != '2') return -1; if (af == AF_INET && *s != '1') return -1; s++; if (s == data_limit) return -1; if (*s != edelim) return -1; s++; if (s == data_limit) return -1; if (af == AF_INET6) { if (in6_pton(s, data_limit - s, (u8 *)addr, edelim, &ep) <= 0) return -1; } else { if (in4_pton(s, data_limit - s, (u8 *)addr, edelim, &ep) <= 0) return -1; } s = (char *) ep; if (s == data_limit) return -1; if (*s != edelim) return -1; s++; } for (hport = 0; ; s++) { if (s == data_limit) return -1; if (!isdigit(*s)) break; hport = hport * 10 + *s - '0'; } if (s == data_limit || !hport || *s != edelim) return -1; s++; *end = s; *port = htons(hport); return 1; } /* Look at outgoing ftp packets to catch the response to a PASV/EPSV command * from the server (inside-to-outside). * When we see one, we build a connection entry with the client address, * client port 0 (unknown at the moment), the server address and the * server port. Mark the current connection entry as a control channel * of the new entry. All this work is just to make the data connection * can be scheduled to the right server later. * * The outgoing packet should be something like * "227 Entering Passive Mode (xxx,xxx,xxx,xxx,ppp,ppp)". * xxx,xxx,xxx,xxx is the server address, ppp,ppp is the server port number. * The extended format for EPSV response provides usually only port: * "229 Entering Extended Passive Mode (|||ppp|)" */ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, struct sk_buff *skb, int *diff, struct ip_vs_iphdr *ipvsh) { char *data, *data_limit; char *start, *end; union nf_inet_addr from; __be16 port; struct ip_vs_conn *n_cp; char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */ unsigned int buf_len; int ret = 0; enum ip_conntrack_info ctinfo; struct nf_conn *ct; *diff = 0; /* Only useful for established sessions */ if (cp->state != IP_VS_TCP_S_ESTABLISHED) return 1; /* Linear packets are much easier to deal with. */ if (skb_ensure_writable(skb, skb->len)) return 0; if (cp->app_data == (void *) IP_VS_FTP_PASV) { data = ip_vs_ftp_data_ptr(skb, ipvsh); data_limit = skb_tail_pointer(skb); if (!data || data >= data_limit) return 1; if (ip_vs_ftp_get_addrport(data, data_limit, SERVER_STRING_PASV, sizeof(SERVER_STRING_PASV)-1, '(', false, IP_VS_FTP_PASV, &from, &port, cp->af, &start, &end) != 1) return 1; IP_VS_DBG(7, "PASV response (%pI4:%u) -> %pI4:%u detected\n", &from.ip, ntohs(port), &cp->caddr.ip, 0); } else if (cp->app_data == (void *) IP_VS_FTP_EPSV) { data = ip_vs_ftp_data_ptr(skb, ipvsh); data_limit = skb_tail_pointer(skb); if (!data || data >= data_limit) return 1; /* Usually, data address is not specified but * we support different address, so pre-set it. */ from = cp->daddr; if (ip_vs_ftp_get_addrport(data, data_limit, SERVER_STRING_EPSV, sizeof(SERVER_STRING_EPSV)-1, '(', true, IP_VS_FTP_EPSV, &from, &port, cp->af, &start, &end) != 1) return 1; IP_VS_DBG_BUF(7, "EPSV response (%s:%u) -> %s:%u detected\n", IP_VS_DBG_ADDR(cp->af, &from), ntohs(port), IP_VS_DBG_ADDR(cp->af, &cp->caddr), 0); } else { return 1; } /* Now update or create a connection entry for it */ { struct ip_vs_conn_param p; ip_vs_conn_fill_param(cp->ipvs, cp->af, ipvsh->protocol, &from, port, &cp->caddr, 0, &p); n_cp = ip_vs_conn_out_get(&p); } if (!n_cp) { struct ip_vs_conn_param p; ip_vs_conn_fill_param(cp->ipvs, cp->af, ipvsh->protocol, &cp->caddr, 0, &cp->vaddr, port, &p); n_cp = ip_vs_conn_new(&p, cp->af, &from, port, IP_VS_CONN_F_NO_CPORT | IP_VS_CONN_F_NFCT, cp->dest, skb->mark); if (!n_cp) return 0; /* add its controller */ ip_vs_control_add(n_cp, cp); } /* Replace the old passive address with the new one */ if (cp->app_data == (void *) IP_VS_FTP_PASV) { from.ip = n_cp->vaddr.ip; port = n_cp->vport; snprintf(buf, sizeof(buf), "%u,%u,%u,%u,%u,%u", ((unsigned char *)&from.ip)[0], ((unsigned char *)&from.ip)[1], ((unsigned char *)&from.ip)[2], ((unsigned char *)&from.ip)[3], ntohs(port) >> 8, ntohs(port) & 0xFF); } else if (cp->app_data == (void *) IP_VS_FTP_EPSV) { from = n_cp->vaddr; port = n_cp->vport; /* Only port, client will use VIP for the data connection */ snprintf(buf, sizeof(buf), "|||%u|", ntohs(port)); } else { *buf = 0; } buf_len = strlen(buf); ct = nf_ct_get(skb, &ctinfo); if (ct) { bool mangled; /* If mangling fails this function will return 0 * which will cause the packet to be dropped. * Mangling can only fail under memory pressure, * hopefully it will succeed on the retransmitted * packet. */ mangled = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, ipvsh->len, start - data, end - start, buf, buf_len); if (mangled) { ip_vs_nfct_expect_related(skb, ct, n_cp, ipvsh->protocol, 0, 0); if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_UNNECESSARY; /* csum is updated */ ret = 1; } } /* Not setting 'diff' is intentional, otherwise the sequence * would be adjusted twice. */ cp->app_data = (void *) IP_VS_FTP_ACTIVE; ip_vs_tcp_conn_listen(n_cp); ip_vs_conn_put(n_cp); return ret; } /* Look at incoming ftp packets to catch the PASV/PORT/EPRT/EPSV command * (outside-to-inside). * * The incoming packet having the PORT command should be something like * "PORT xxx,xxx,xxx,xxx,ppp,ppp\n". * xxx,xxx,xxx,xxx is the client address, ppp,ppp is the client port number. * In this case, we create a connection entry using the client address and * port, so that the active ftp data connection from the server can reach * the client. * Extended format: * "EPSV\r\n" when client requests server address from same family * "EPSV 1\r\n" when client requests IPv4 server address * "EPSV 2\r\n" when client requests IPv6 server address * "EPSV ALL\r\n" - not supported * EPRT with specified delimiter (ASCII 33..126), "|" by default: * "EPRT |1|IPv4ADDR|PORT|\r\n" when client provides IPv4 addrport * "EPRT |2|IPv6ADDR|PORT|\r\n" when client provides IPv6 addrport */ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, struct sk_buff *skb, int *diff, struct ip_vs_iphdr *ipvsh) { char *data, *data_start, *data_limit; char *start, *end; union nf_inet_addr to; __be16 port; struct ip_vs_conn *n_cp; /* no diff required for incoming packets */ *diff = 0; /* Only useful for established sessions */ if (cp->state != IP_VS_TCP_S_ESTABLISHED) return 1; /* Linear packets are much easier to deal with. */ if (skb_ensure_writable(skb, skb->len)) return 0; data = data_start = ip_vs_ftp_data_ptr(skb, ipvsh); data_limit = skb_tail_pointer(skb); if (!data || data >= data_limit) return 1; while (data <= data_limit - 6) { if (cp->af == AF_INET && strncasecmp(data, "PASV\r\n", 6) == 0) { /* Passive mode on */ IP_VS_DBG(7, "got PASV at %td of %td\n", data - data_start, data_limit - data_start); cp->app_data = (void *) IP_VS_FTP_PASV; return 1; } /* EPSV or EPSV<space><net-prt> */ if (strncasecmp(data, "EPSV", 4) == 0 && (data[4] == ' ' || data[4] == '\r')) { if (data[4] == ' ') { char proto = data[5]; if (data > data_limit - 7 || data[6] != '\r') return 1; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6 && proto == '2') { } else #endif if (cp->af == AF_INET && proto == '1') { } else { return 1; } } /* Extended Passive mode on */ IP_VS_DBG(7, "got EPSV at %td of %td\n", data - data_start, data_limit - data_start); cp->app_data = (void *) IP_VS_FTP_EPSV; return 1; } data++; } /* * To support virtual FTP server, the scenerio is as follows: * FTP client ----> Load Balancer ----> FTP server * First detect the port number in the application data, * then create a new connection entry for the coming data * connection. */ if (cp->af == AF_INET && ip_vs_ftp_get_addrport(data_start, data_limit, CLIENT_STRING_PORT, sizeof(CLIENT_STRING_PORT)-1, ' ', false, IP_VS_FTP_PORT, &to, &port, cp->af, &start, &end) == 1) { IP_VS_DBG(7, "PORT %pI4:%u detected\n", &to.ip, ntohs(port)); /* Now update or create a connection entry for it */ IP_VS_DBG(7, "protocol %s %pI4:%u %pI4:%u\n", ip_vs_proto_name(ipvsh->protocol), &to.ip, ntohs(port), &cp->vaddr.ip, ntohs(cp->vport)-1); } else if (ip_vs_ftp_get_addrport(data_start, data_limit, CLIENT_STRING_EPRT, sizeof(CLIENT_STRING_EPRT)-1, ' ', true, IP_VS_FTP_EPRT, &to, &port, cp->af, &start, &end) == 1) { IP_VS_DBG_BUF(7, "EPRT %s:%u detected\n", IP_VS_DBG_ADDR(cp->af, &to), ntohs(port)); /* Now update or create a connection entry for it */ IP_VS_DBG_BUF(7, "protocol %s %s:%u %s:%u\n", ip_vs_proto_name(ipvsh->protocol), IP_VS_DBG_ADDR(cp->af, &to), ntohs(port), IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport)-1); } else { return 1; } /* Passive mode off */ cp->app_data = (void *) IP_VS_FTP_ACTIVE; { struct ip_vs_conn_param p; ip_vs_conn_fill_param(cp->ipvs, cp->af, ipvsh->protocol, &to, port, &cp->vaddr, htons(ntohs(cp->vport)-1), &p); n_cp = ip_vs_conn_in_get(&p); if (!n_cp) { n_cp = ip_vs_conn_new(&p, cp->af, &cp->daddr, htons(ntohs(cp->dport)-1), IP_VS_CONN_F_NFCT, cp->dest, skb->mark); if (!n_cp) return 0; /* add its controller */ ip_vs_control_add(n_cp, cp); } } /* * Move tunnel to listen state */ ip_vs_tcp_conn_listen(n_cp); ip_vs_conn_put(n_cp); return 1; } static struct ip_vs_app ip_vs_ftp = { .name = "ftp", .type = IP_VS_APP_TYPE_FTP, .protocol = IPPROTO_TCP, .module = THIS_MODULE, .incs_list = LIST_HEAD_INIT(ip_vs_ftp.incs_list), .init_conn = ip_vs_ftp_init_conn, .done_conn = ip_vs_ftp_done_conn, .bind_conn = NULL, .unbind_conn = NULL, .pkt_out = ip_vs_ftp_out, .pkt_in = ip_vs_ftp_in, }; /* * per netns ip_vs_ftp initialization */ static int __net_init __ip_vs_ftp_init(struct net *net) { int i, ret; struct ip_vs_app *app; struct netns_ipvs *ipvs = net_ipvs(net); if (!ipvs) return -ENOENT; app = register_ip_vs_app(ipvs, &ip_vs_ftp); if (IS_ERR(app)) return PTR_ERR(app); for (i = 0; i < ports_count; i++) { if (!ports[i]) continue; ret = register_ip_vs_app_inc(ipvs, app, app->protocol, ports[i]); if (ret) goto err_unreg; } return 0; err_unreg: unregister_ip_vs_app(ipvs, &ip_vs_ftp); return ret; } /* * netns exit */ static void __ip_vs_ftp_exit(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); if (!ipvs) return; unregister_ip_vs_app(ipvs, &ip_vs_ftp); } static struct pernet_operations ip_vs_ftp_ops = { .init = __ip_vs_ftp_init, .exit = __ip_vs_ftp_exit, }; static int __init ip_vs_ftp_init(void) { /* rcu_barrier() is called by netns on error */ return register_pernet_subsys(&ip_vs_ftp_ops); } /* * ip_vs_ftp finish. */ static void __exit ip_vs_ftp_exit(void) { unregister_pernet_subsys(&ip_vs_ftp_ops); /* rcu_barrier() is called by netns */ } module_init(ip_vs_ftp_init); module_exit(ip_vs_ftp_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ipvs ftp helper");
69 56 12 69 69 66 51 1 1 64 64 85 85 42 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 // SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API. * * HMAC: Keyed-Hashing for Message Authentication (RFC2104). * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> * * The HMAC implementation is derived from USAGI. * Copyright (c) 2002 Kazunori Miyazawa <miyazawa@linux-ipv6.org> / USAGI */ #include <crypto/hmac.h> #include <crypto/internal/hash.h> #include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/fips.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/string.h> struct hmac_ctx { struct crypto_shash *hash; /* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */ u8 pads[]; }; static int hmac_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { int bs = crypto_shash_blocksize(parent); int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); struct hmac_ctx *tctx = crypto_shash_ctx(parent); struct crypto_shash *hash = tctx->hash; u8 *ipad = &tctx->pads[0]; u8 *opad = &tctx->pads[ss]; SHASH_DESC_ON_STACK(shash, hash); unsigned int i; if (fips_enabled && (keylen < 112 / 8)) return -EINVAL; shash->tfm = hash; if (keylen > bs) { int err; err = crypto_shash_digest(shash, inkey, keylen, ipad); if (err) return err; keylen = ds; } else memcpy(ipad, inkey, keylen); memset(ipad + keylen, 0, bs - keylen); memcpy(opad, ipad, bs); for (i = 0; i < bs; i++) { ipad[i] ^= HMAC_IPAD_VALUE; opad[i] ^= HMAC_OPAD_VALUE; } return crypto_shash_init(shash) ?: crypto_shash_update(shash, ipad, bs) ?: crypto_shash_export(shash, ipad) ?: crypto_shash_init(shash) ?: crypto_shash_update(shash, opad, bs) ?: crypto_shash_export(shash, opad); } static int hmac_export(struct shash_desc *pdesc, void *out) { struct shash_desc *desc = shash_desc_ctx(pdesc); return crypto_shash_export(desc, out); } static int hmac_import(struct shash_desc *pdesc, const void *in) { struct shash_desc *desc = shash_desc_ctx(pdesc); const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); desc->tfm = tctx->hash; return crypto_shash_import(desc, in); } static int hmac_init(struct shash_desc *pdesc) { const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); return hmac_import(pdesc, &tctx->pads[0]); } static int hmac_update(struct shash_desc *pdesc, const u8 *data, unsigned int nbytes) { struct shash_desc *desc = shash_desc_ctx(pdesc); return crypto_shash_update(desc, data, nbytes); } static int hmac_final(struct shash_desc *pdesc, u8 *out) { struct crypto_shash *parent = pdesc->tfm; int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); const struct hmac_ctx *tctx = crypto_shash_ctx(parent); const u8 *opad = &tctx->pads[ss]; struct shash_desc *desc = shash_desc_ctx(pdesc); return crypto_shash_final(desc, out) ?: crypto_shash_import(desc, opad) ?: crypto_shash_finup(desc, out, ds, out); } static int hmac_finup(struct shash_desc *pdesc, const u8 *data, unsigned int nbytes, u8 *out) { struct crypto_shash *parent = pdesc->tfm; int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); const struct hmac_ctx *tctx = crypto_shash_ctx(parent); const u8 *opad = &tctx->pads[ss]; struct shash_desc *desc = shash_desc_ctx(pdesc); return crypto_shash_finup(desc, data, nbytes, out) ?: crypto_shash_import(desc, opad) ?: crypto_shash_finup(desc, out, ds, out); } static int hmac_init_tfm(struct crypto_shash *parent) { struct crypto_shash *hash; struct shash_instance *inst = shash_alg_instance(parent); struct crypto_shash_spawn *spawn = shash_instance_ctx(inst); struct hmac_ctx *tctx = crypto_shash_ctx(parent); hash = crypto_spawn_shash(spawn); if (IS_ERR(hash)) return PTR_ERR(hash); parent->descsize = sizeof(struct shash_desc) + crypto_shash_descsize(hash); tctx->hash = hash; return 0; } static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src) { struct hmac_ctx *sctx = crypto_shash_ctx(src); struct hmac_ctx *dctx = crypto_shash_ctx(dst); struct crypto_shash *hash; hash = crypto_clone_shash(sctx->hash); if (IS_ERR(hash)) return PTR_ERR(hash); dctx->hash = hash; return 0; } static void hmac_exit_tfm(struct crypto_shash *parent) { struct hmac_ctx *tctx = crypto_shash_ctx(parent); crypto_free_shash(tctx->hash); } static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) { struct shash_instance *inst; struct crypto_shash_spawn *spawn; struct crypto_alg *alg; struct shash_alg *salg; u32 mask; int err; int ds; int ss; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; spawn = shash_instance_ctx(inst); err = crypto_grab_shash(spawn, shash_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; salg = crypto_spawn_shash_alg(spawn); alg = &salg->base; /* The underlying hash algorithm must not require a key */ err = -EINVAL; if (crypto_shash_alg_needs_key(salg)) goto err_free_inst; ds = salg->digestsize; ss = salg->statesize; if (ds > alg->cra_blocksize || ss < alg->cra_blocksize) goto err_free_inst; err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg); if (err) goto err_free_inst; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + (ss * 2); inst->alg.digestsize = ds; inst->alg.statesize = ss; inst->alg.init = hmac_init; inst->alg.update = hmac_update; inst->alg.final = hmac_final; inst->alg.finup = hmac_finup; inst->alg.export = hmac_export; inst->alg.import = hmac_import; inst->alg.setkey = hmac_setkey; inst->alg.init_tfm = hmac_init_tfm; inst->alg.clone_tfm = hmac_clone_tfm; inst->alg.exit_tfm = hmac_exit_tfm; inst->free = shash_free_singlespawn_instance; err = shash_register_instance(tmpl, inst); if (err) { err_free_inst: shash_free_singlespawn_instance(inst); } return err; } static struct crypto_template hmac_tmpl = { .name = "hmac", .create = hmac_create, .module = THIS_MODULE, }; static int __init hmac_module_init(void) { return crypto_register_template(&hmac_tmpl); } static void __exit hmac_module_exit(void) { crypto_unregister_template(&hmac_tmpl); } subsys_initcall(hmac_module_init); module_exit(hmac_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("HMAC hash algorithm"); MODULE_ALIAS_CRYPTO("hmac");
1 9 10 10 104 105 2 2 3 3 3 46 3 33 14 14 20 6 25 10 2 25 2 11 5 1 2 1 1 1 3 13 8 15 2 15 22 3 3 3 1 2 2 43 41 5 2 2 1 2 31 29 4 33 43 6 6 4 4 3 1 4 3 1 2 2 2 2 3 1 4 2 2 4 17 3 3 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) International Business Machines Corp., 2000-2004 * Portions Copyright (C) Christoph Hellwig, 2001-2002 */ #include <linux/fs.h> #include <linux/module.h> #include <linux/parser.h> #include <linux/completion.h> #include <linux/vfs.h> #include <linux/quotaops.h> #include <linux/mount.h> #include <linux/moduleparam.h> #include <linux/kthread.h> #include <linux/posix_acl.h> #include <linux/buffer_head.h> #include <linux/exportfs.h> #include <linux/crc32.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/seq_file.h> #include <linux/blkdev.h> #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_inode.h" #include "jfs_metapage.h" #include "jfs_superblock.h" #include "jfs_dmap.h" #include "jfs_imap.h" #include "jfs_acl.h" #include "jfs_debug.h" #include "jfs_xattr.h" #include "jfs_dinode.h" MODULE_DESCRIPTION("The Journaled Filesystem (JFS)"); MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM"); MODULE_LICENSE("GPL"); static struct kmem_cache *jfs_inode_cachep; static const struct super_operations jfs_super_operations; static const struct export_operations jfs_export_operations; static struct file_system_type jfs_fs_type; #define MAX_COMMIT_THREADS 64 static int commit_threads; module_param(commit_threads, int, 0); MODULE_PARM_DESC(commit_threads, "Number of commit threads"); static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS]; struct task_struct *jfsIOthread; struct task_struct *jfsSyncThread; #ifdef CONFIG_JFS_DEBUG int jfsloglevel = JFS_LOGLEVEL_WARN; module_param(jfsloglevel, int, 0644); MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)"); #endif static void jfs_handle_error(struct super_block *sb) { struct jfs_sb_info *sbi = JFS_SBI(sb); if (sb_rdonly(sb)) return; updateSuper(sb, FM_DIRTY); if (sbi->flag & JFS_ERR_PANIC) panic("JFS (device %s): panic forced after error\n", sb->s_id); else if (sbi->flag & JFS_ERR_REMOUNT_RO) { jfs_err("ERROR: (device %s): remounting filesystem as read-only", sb->s_id); sb->s_flags |= SB_RDONLY; } /* nothing is done for continue beyond marking the superblock dirty */ } void jfs_error(struct super_block *sb, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_err("ERROR: (device %s): %ps: %pV\n", sb->s_id, __builtin_return_address(0), &vaf); va_end(args); jfs_handle_error(sb); } static struct inode *jfs_alloc_inode(struct super_block *sb) { struct jfs_inode_info *jfs_inode; jfs_inode = alloc_inode_sb(sb, jfs_inode_cachep, GFP_NOFS); if (!jfs_inode) return NULL; #ifdef CONFIG_QUOTA memset(&jfs_inode->i_dquot, 0, sizeof(jfs_inode->i_dquot)); #endif return &jfs_inode->vfs_inode; } static void jfs_free_inode(struct inode *inode) { kmem_cache_free(jfs_inode_cachep, JFS_IP(inode)); } static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb); s64 maxinodes; struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap; jfs_info("In jfs_statfs"); buf->f_type = JFS_SUPER_MAGIC; buf->f_bsize = sbi->bsize; buf->f_blocks = sbi->bmap->db_mapsize; buf->f_bfree = sbi->bmap->db_nfree; buf->f_bavail = sbi->bmap->db_nfree; /* * If we really return the number of allocated & free inodes, some * applications will fail because they won't see enough free inodes. * We'll try to calculate some guess as to how many inodes we can * really allocate * * buf->f_files = atomic_read(&imap->im_numinos); * buf->f_ffree = atomic_read(&imap->im_numfree); */ maxinodes = min((s64) atomic_read(&imap->im_numinos) + ((sbi->bmap->db_nfree >> imap->im_l2nbperiext) << L2INOSPEREXT), (s64) 0xffffffffLL); buf->f_files = maxinodes; buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) - atomic_read(&imap->im_numfree)); buf->f_fsid.val[0] = crc32_le(0, (char *)&sbi->uuid, sizeof(sbi->uuid)/2); buf->f_fsid.val[1] = crc32_le(0, (char *)&sbi->uuid + sizeof(sbi->uuid)/2, sizeof(sbi->uuid)/2); buf->f_namelen = JFS_NAME_MAX; return 0; } #ifdef CONFIG_QUOTA static int jfs_quota_off(struct super_block *sb, int type); static int jfs_quota_on(struct super_block *sb, int type, int format_id, const struct path *path); static void jfs_quota_off_umount(struct super_block *sb) { int type; for (type = 0; type < MAXQUOTAS; type++) jfs_quota_off(sb, type); } static const struct quotactl_ops jfs_quotactl_ops = { .quota_on = jfs_quota_on, .quota_off = jfs_quota_off, .quota_sync = dquot_quota_sync, .get_state = dquot_get_state, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk, .get_nextdqblk = dquot_get_next_dqblk, }; #else static inline void jfs_quota_off_umount(struct super_block *sb) { } #endif static void jfs_put_super(struct super_block *sb) { struct jfs_sb_info *sbi = JFS_SBI(sb); int rc; jfs_info("In jfs_put_super"); jfs_quota_off_umount(sb); rc = jfs_umount(sb); if (rc) jfs_err("jfs_umount failed with return code %d", rc); unload_nls(sbi->nls_tab); truncate_inode_pages(sbi->direct_inode->i_mapping, 0); iput(sbi->direct_inode); kfree(sbi); } enum { Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize, Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota, Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask, Opt_discard, Opt_nodiscard, Opt_discard_minblk }; static const match_table_t tokens = { {Opt_integrity, "integrity"}, {Opt_nointegrity, "nointegrity"}, {Opt_iocharset, "iocharset=%s"}, {Opt_resize, "resize=%u"}, {Opt_resize_nosize, "resize"}, {Opt_errors, "errors=%s"}, {Opt_ignore, "noquota"}, {Opt_quota, "quota"}, {Opt_usrquota, "usrquota"}, {Opt_grpquota, "grpquota"}, {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_umask, "umask=%u"}, {Opt_discard, "discard"}, {Opt_nodiscard, "nodiscard"}, {Opt_discard_minblk, "discard=%u"}, {Opt_err, NULL} }; static int parse_options(char *options, struct super_block *sb, s64 *newLVSize, int *flag) { void *nls_map = (void *)-1; /* -1: no change; NULL: none */ char *p; struct jfs_sb_info *sbi = JFS_SBI(sb); *newLVSize = 0; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_integrity: *flag &= ~JFS_NOINTEGRITY; break; case Opt_nointegrity: *flag |= JFS_NOINTEGRITY; break; case Opt_ignore: /* Silently ignore the quota options */ /* Don't do anything ;-) */ break; case Opt_iocharset: if (nls_map && nls_map != (void *) -1) unload_nls(nls_map); if (!strcmp(args[0].from, "none")) nls_map = NULL; else { nls_map = load_nls(args[0].from); if (!nls_map) { pr_err("JFS: charset not found\n"); goto cleanup; } } break; case Opt_resize: { char *resize = args[0].from; int rc = kstrtoll(resize, 0, newLVSize); if (rc) goto cleanup; break; } case Opt_resize_nosize: { *newLVSize = sb_bdev_nr_blocks(sb); if (*newLVSize == 0) pr_err("JFS: Cannot determine volume size\n"); break; } case Opt_errors: { char *errors = args[0].from; if (!errors || !*errors) goto cleanup; if (!strcmp(errors, "continue")) { *flag &= ~JFS_ERR_REMOUNT_RO; *flag &= ~JFS_ERR_PANIC; *flag |= JFS_ERR_CONTINUE; } else if (!strcmp(errors, "remount-ro")) { *flag &= ~JFS_ERR_CONTINUE; *flag &= ~JFS_ERR_PANIC; *flag |= JFS_ERR_REMOUNT_RO; } else if (!strcmp(errors, "panic")) { *flag &= ~JFS_ERR_CONTINUE; *flag &= ~JFS_ERR_REMOUNT_RO; *flag |= JFS_ERR_PANIC; } else { pr_err("JFS: %s is an invalid error handler\n", errors); goto cleanup; } break; } #ifdef CONFIG_QUOTA case Opt_quota: case Opt_usrquota: *flag |= JFS_USRQUOTA; break; case Opt_grpquota: *flag |= JFS_GRPQUOTA; break; #else case Opt_usrquota: case Opt_grpquota: case Opt_quota: pr_err("JFS: quota operations not supported\n"); break; #endif case Opt_uid: { char *uid = args[0].from; uid_t val; int rc = kstrtouint(uid, 0, &val); if (rc) goto cleanup; sbi->uid = make_kuid(current_user_ns(), val); if (!uid_valid(sbi->uid)) goto cleanup; break; } case Opt_gid: { char *gid = args[0].from; gid_t val; int rc = kstrtouint(gid, 0, &val); if (rc) goto cleanup; sbi->gid = make_kgid(current_user_ns(), val); if (!gid_valid(sbi->gid)) goto cleanup; break; } case Opt_umask: { char *umask = args[0].from; int rc = kstrtouint(umask, 8, &sbi->umask); if (rc) goto cleanup; if (sbi->umask & ~0777) { pr_err("JFS: Invalid value of umask\n"); goto cleanup; } break; } case Opt_discard: /* if set to 1, even copying files will cause * trimming :O * -> user has more control over the online trimming */ sbi->minblks_trim = 64; if (bdev_max_discard_sectors(sb->s_bdev)) *flag |= JFS_DISCARD; else pr_err("JFS: discard option not supported on device\n"); break; case Opt_nodiscard: *flag &= ~JFS_DISCARD; break; case Opt_discard_minblk: { char *minblks_trim = args[0].from; int rc; if (bdev_max_discard_sectors(sb->s_bdev)) { *flag |= JFS_DISCARD; rc = kstrtouint(minblks_trim, 0, &sbi->minblks_trim); if (rc) goto cleanup; } else pr_err("JFS: discard option not supported on device\n"); break; } default: printk("jfs: Unrecognized mount option \"%s\" or missing value\n", p); goto cleanup; } } if (nls_map != (void *) -1) { /* Discard old (if remount) */ unload_nls(sbi->nls_tab); sbi->nls_tab = nls_map; } return 1; cleanup: if (nls_map && nls_map != (void *) -1) unload_nls(nls_map); return 0; } static int jfs_remount(struct super_block *sb, int *flags, char *data) { s64 newLVSize = 0; int rc = 0; int flag = JFS_SBI(sb)->flag; int ret; sync_filesystem(sb); if (!parse_options(data, sb, &newLVSize, &flag)) return -EINVAL; if (newLVSize) { if (sb_rdonly(sb)) { pr_err("JFS: resize requires volume to be mounted read-write\n"); return -EROFS; } rc = jfs_extendfs(sb, newLVSize, 0); if (rc) return rc; } if (sb_rdonly(sb) && !(*flags & SB_RDONLY)) { /* * Invalidate any previously read metadata. fsck may have * changed the on-disk data since we mounted r/o */ truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0); JFS_SBI(sb)->flag = flag; ret = jfs_mount_rw(sb, 1); /* mark the fs r/w for quota activity */ sb->s_flags &= ~SB_RDONLY; dquot_resume(sb, -1); return ret; } if (!sb_rdonly(sb) && (*flags & SB_RDONLY)) { rc = dquot_suspend(sb, -1); if (rc < 0) return rc; rc = jfs_umount_rw(sb); JFS_SBI(sb)->flag = flag; return rc; } if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY)) if (!sb_rdonly(sb)) { rc = jfs_umount_rw(sb); if (rc) return rc; JFS_SBI(sb)->flag = flag; ret = jfs_mount_rw(sb, 1); return ret; } JFS_SBI(sb)->flag = flag; return 0; } static int jfs_fill_super(struct super_block *sb, void *data, int silent) { struct jfs_sb_info *sbi; struct inode *inode; int rc; s64 newLVSize = 0; int flag, ret = -EINVAL; jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags); sbi = kzalloc(sizeof(struct jfs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; sb->s_max_links = JFS_LINK_MAX; sb->s_time_min = 0; sb->s_time_max = U32_MAX; sbi->sb = sb; sbi->uid = INVALID_UID; sbi->gid = INVALID_GID; sbi->umask = -1; /* initialize the mount flag and determine the default error handler */ flag = JFS_ERR_REMOUNT_RO; if (!parse_options((char *) data, sb, &newLVSize, &flag)) goto out_kfree; sbi->flag = flag; #ifdef CONFIG_JFS_POSIX_ACL sb->s_flags |= SB_POSIXACL; #endif if (newLVSize) { pr_err("resize option for remount only\n"); goto out_kfree; } /* * Initialize blocksize to 4K. */ sb_set_blocksize(sb, PSIZE); /* * Set method vectors. */ sb->s_op = &jfs_super_operations; sb->s_export_op = &jfs_export_operations; sb->s_xattr = jfs_xattr_handlers; #ifdef CONFIG_QUOTA sb->dq_op = &dquot_operations; sb->s_qcop = &jfs_quotactl_ops; sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; #endif /* * Initialize direct-mapping inode/address-space */ inode = new_inode(sb); if (inode == NULL) { ret = -ENOMEM; goto out_unload; } inode->i_size = bdev_nr_bytes(sb->s_bdev); inode->i_mapping->a_ops = &jfs_metapage_aops; inode_fake_hash(inode); mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); sbi->direct_inode = inode; rc = jfs_mount(sb); if (rc) { if (!silent) jfs_err("jfs_mount failed w/return code = %d", rc); goto out_mount_failed; } if (sb_rdonly(sb)) sbi->log = NULL; else { rc = jfs_mount_rw(sb, 0); if (rc) { if (!silent) { jfs_err("jfs_mount_rw failed, return code = %d", rc); } goto out_no_rw; } } sb->s_magic = JFS_SUPER_MAGIC; if (sbi->mntflag & JFS_OS2) sb->s_d_op = &jfs_ci_dentry_operations; inode = jfs_iget(sb, ROOT_I); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto out_no_rw; } sb->s_root = d_make_root(inode); if (!sb->s_root) goto out_no_root; /* logical blocks are represented by 40 bits in pxd_t, etc. * and page cache is indexed by long */ sb->s_maxbytes = min(((loff_t)sb->s_blocksize) << 40, MAX_LFS_FILESIZE); sb->s_time_gran = 1; return 0; out_no_root: jfs_err("jfs_read_super: get root dentry failed"); out_no_rw: rc = jfs_umount(sb); if (rc) jfs_err("jfs_umount failed with return code %d", rc); out_mount_failed: filemap_write_and_wait(sbi->direct_inode->i_mapping); truncate_inode_pages(sbi->direct_inode->i_mapping, 0); make_bad_inode(sbi->direct_inode); iput(sbi->direct_inode); sbi->direct_inode = NULL; out_unload: unload_nls(sbi->nls_tab); out_kfree: kfree(sbi); return ret; } static int jfs_freeze(struct super_block *sb) { struct jfs_sb_info *sbi = JFS_SBI(sb); struct jfs_log *log = sbi->log; int rc = 0; if (!sb_rdonly(sb)) { txQuiesce(sb); rc = lmLogShutdown(log); if (rc) { jfs_error(sb, "lmLogShutdown failed\n"); /* let operations fail rather than hang */ txResume(sb); return rc; } rc = updateSuper(sb, FM_CLEAN); if (rc) { jfs_err("jfs_freeze: updateSuper failed"); /* * Don't fail here. Everything succeeded except * marking the superblock clean, so there's really * no harm in leaving it frozen for now. */ } } return 0; } static int jfs_unfreeze(struct super_block *sb) { struct jfs_sb_info *sbi = JFS_SBI(sb); struct jfs_log *log = sbi->log; int rc = 0; if (!sb_rdonly(sb)) { rc = updateSuper(sb, FM_MOUNT); if (rc) { jfs_error(sb, "updateSuper failed\n"); goto out; } rc = lmLogInit(log); if (rc) jfs_error(sb, "lmLogInit failed\n"); out: txResume(sb); } return rc; } static struct dentry *jfs_do_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, jfs_fill_super); } static int jfs_sync_fs(struct super_block *sb, int wait) { struct jfs_log *log = JFS_SBI(sb)->log; /* log == NULL indicates read-only mount */ if (log) { /* * Write quota structures to quota file, sync_blockdev() will * write them to disk later */ dquot_writeback_dquots(sb, -1); jfs_flush_journal(log, wait); jfs_syncpt(log, 0); } return 0; } static int jfs_show_options(struct seq_file *seq, struct dentry *root) { struct jfs_sb_info *sbi = JFS_SBI(root->d_sb); if (uid_valid(sbi->uid)) seq_printf(seq, ",uid=%d", from_kuid(&init_user_ns, sbi->uid)); if (gid_valid(sbi->gid)) seq_printf(seq, ",gid=%d", from_kgid(&init_user_ns, sbi->gid)); if (sbi->umask != -1) seq_printf(seq, ",umask=%03o", sbi->umask); if (sbi->flag & JFS_NOINTEGRITY) seq_puts(seq, ",nointegrity"); if (sbi->flag & JFS_DISCARD) seq_printf(seq, ",discard=%u", sbi->minblks_trim); if (sbi->nls_tab) seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset); if (sbi->flag & JFS_ERR_CONTINUE) seq_printf(seq, ",errors=continue"); if (sbi->flag & JFS_ERR_PANIC) seq_printf(seq, ",errors=panic"); #ifdef CONFIG_QUOTA if (sbi->flag & JFS_USRQUOTA) seq_puts(seq, ",usrquota"); if (sbi->flag & JFS_GRPQUOTA) seq_puts(seq, ",grpquota"); #endif return 0; } #ifdef CONFIG_QUOTA /* Read data from quotafile - avoid pagecache and such because we cannot afford * acquiring the locks... As quota files are never truncated and quota code * itself serializes the operations (and no one else should touch the files) * we don't have to be afraid of races */ static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> sb->s_blocksize_bits; int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; struct buffer_head tmp_bh; struct buffer_head *bh; loff_t i_size = i_size_read(inode); if (off > i_size) return 0; if (off+len > i_size) len = i_size-off; toread = len; while (toread > 0) { tocopy = min_t(size_t, sb->s_blocksize - offset, toread); tmp_bh.b_state = 0; tmp_bh.b_size = i_blocksize(inode); err = jfs_get_block(inode, blk, &tmp_bh, 0); if (err) return err; if (!buffer_mapped(&tmp_bh)) /* A hole? */ memset(data, 0, tocopy); else { bh = sb_bread(sb, tmp_bh.b_blocknr); if (!bh) return -EIO; memcpy(data, bh->b_data+offset, tocopy); brelse(bh); } offset = 0; toread -= tocopy; data += tocopy; blk++; } return len; } /* Write to quotafile */ static ssize_t jfs_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> sb->s_blocksize_bits; int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t towrite = len; struct buffer_head tmp_bh; struct buffer_head *bh; inode_lock(inode); while (towrite > 0) { tocopy = min_t(size_t, sb->s_blocksize - offset, towrite); tmp_bh.b_state = 0; tmp_bh.b_size = i_blocksize(inode); err = jfs_get_block(inode, blk, &tmp_bh, 1); if (err) goto out; if (offset || tocopy != sb->s_blocksize) bh = sb_bread(sb, tmp_bh.b_blocknr); else bh = sb_getblk(sb, tmp_bh.b_blocknr); if (!bh) { err = -EIO; goto out; } lock_buffer(bh); memcpy(bh->b_data+offset, data, tocopy); flush_dcache_page(bh->b_page); set_buffer_uptodate(bh); mark_buffer_dirty(bh); unlock_buffer(bh); brelse(bh); offset = 0; towrite -= tocopy; data += tocopy; blk++; } out: if (len == towrite) { inode_unlock(inode); return err; } if (inode->i_size < off+len-towrite) i_size_write(inode, off+len-towrite); inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); mark_inode_dirty(inode); inode_unlock(inode); return len - towrite; } static struct dquot __rcu **jfs_get_dquots(struct inode *inode) { return JFS_IP(inode)->i_dquot; } static int jfs_quota_on(struct super_block *sb, int type, int format_id, const struct path *path) { int err; struct inode *inode; err = dquot_quota_on(sb, type, format_id, path); if (err) return err; inode = d_inode(path->dentry); inode_lock(inode); JFS_IP(inode)->mode2 |= JFS_NOATIME_FL | JFS_IMMUTABLE_FL; inode_set_flags(inode, S_NOATIME | S_IMMUTABLE, S_NOATIME | S_IMMUTABLE); inode_unlock(inode); mark_inode_dirty(inode); return 0; } static int jfs_quota_off(struct super_block *sb, int type) { struct inode *inode = sb_dqopt(sb)->files[type]; int err; if (!inode || !igrab(inode)) goto out; err = dquot_quota_off(sb, type); if (err) goto out_put; inode_lock(inode); JFS_IP(inode)->mode2 &= ~(JFS_NOATIME_FL | JFS_IMMUTABLE_FL); inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE); inode_unlock(inode); mark_inode_dirty(inode); out_put: iput(inode); return err; out: return dquot_quota_off(sb, type); } #endif static const struct super_operations jfs_super_operations = { .alloc_inode = jfs_alloc_inode, .free_inode = jfs_free_inode, .dirty_inode = jfs_dirty_inode, .write_inode = jfs_write_inode, .evict_inode = jfs_evict_inode, .put_super = jfs_put_super, .sync_fs = jfs_sync_fs, .freeze_fs = jfs_freeze, .unfreeze_fs = jfs_unfreeze, .statfs = jfs_statfs, .remount_fs = jfs_remount, .show_options = jfs_show_options, #ifdef CONFIG_QUOTA .quota_read = jfs_quota_read, .quota_write = jfs_quota_write, .get_dquots = jfs_get_dquots, #endif }; static const struct export_operations jfs_export_operations = { .encode_fh = generic_encode_ino32_fh, .fh_to_dentry = jfs_fh_to_dentry, .fh_to_parent = jfs_fh_to_parent, .get_parent = jfs_get_parent, }; static struct file_system_type jfs_fs_type = { .owner = THIS_MODULE, .name = "jfs", .mount = jfs_do_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("jfs"); static void init_once(void *foo) { struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); INIT_LIST_HEAD(&jfs_ip->anon_inode_list); init_rwsem(&jfs_ip->rdwrlock); mutex_init(&jfs_ip->commit_mutex); init_rwsem(&jfs_ip->xattr_sem); spin_lock_init(&jfs_ip->ag_lock); jfs_ip->active_ag = -1; inode_init_once(&jfs_ip->vfs_inode); } static int __init init_jfs_fs(void) { int i; int rc; jfs_inode_cachep = kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info), 0, SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, offsetof(struct jfs_inode_info, i_inline_all), sizeof_field(struct jfs_inode_info, i_inline_all), init_once); if (jfs_inode_cachep == NULL) return -ENOMEM; /* * Metapage initialization */ rc = metapage_init(); if (rc) { jfs_err("metapage_init failed w/rc = %d", rc); goto free_slab; } /* * Transaction Manager initialization */ rc = txInit(); if (rc) { jfs_err("txInit failed w/rc = %d", rc); goto free_metapage; } /* * I/O completion thread (endio) */ jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO"); if (IS_ERR(jfsIOthread)) { rc = PTR_ERR(jfsIOthread); jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); goto end_txmngr; } if (commit_threads < 1) commit_threads = num_online_cpus(); if (commit_threads > MAX_COMMIT_THREADS) commit_threads = MAX_COMMIT_THREADS; for (i = 0; i < commit_threads; i++) { jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL, "jfsCommit"); if (IS_ERR(jfsCommitThread[i])) { rc = PTR_ERR(jfsCommitThread[i]); jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); commit_threads = i; goto kill_committask; } } jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync"); if (IS_ERR(jfsSyncThread)) { rc = PTR_ERR(jfsSyncThread); jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); goto kill_committask; } #ifdef PROC_FS_JFS jfs_proc_init(); #endif rc = register_filesystem(&jfs_fs_type); if (!rc) return 0; #ifdef PROC_FS_JFS jfs_proc_clean(); #endif kthread_stop(jfsSyncThread); kill_committask: for (i = 0; i < commit_threads; i++) kthread_stop(jfsCommitThread[i]); kthread_stop(jfsIOthread); end_txmngr: txExit(); free_metapage: metapage_exit(); free_slab: kmem_cache_destroy(jfs_inode_cachep); return rc; } static void __exit exit_jfs_fs(void) { int i; jfs_info("exit_jfs_fs called"); txExit(); metapage_exit(); kthread_stop(jfsIOthread); for (i = 0; i < commit_threads; i++) kthread_stop(jfsCommitThread[i]); kthread_stop(jfsSyncThread); #ifdef PROC_FS_JFS jfs_proc_clean(); #endif unregister_filesystem(&jfs_fs_type); /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(jfs_inode_cachep); } module_init(init_jfs_fs) module_exit(exit_jfs_fs)
20 1 1 1 1324 1322 253 250 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 // SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * IB infrastructure: * Establish SMC-R as an Infiniband Client to be notified about added and * removed IB devices of type RDMA. * Determine device and port characteristics for these IB devices. * * Copyright IBM Corp. 2016 * * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> */ #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/random.h> #include <linux/workqueue.h> #include <linux/scatterlist.h> #include <linux/wait.h> #include <linux/mutex.h> #include <linux/inetdevice.h> #include <rdma/ib_verbs.h> #include <rdma/ib_cache.h> #include "smc_pnet.h" #include "smc_ib.h" #include "smc_core.h" #include "smc_wr.h" #include "smc.h" #include "smc_netlink.h" #define SMC_MAX_CQE 32766 /* max. # of completion queue elements */ #define SMC_QP_MIN_RNR_TIMER 5 #define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */ #define SMC_QP_RETRY_CNT 7 /* 7: infinite */ #define SMC_QP_RNR_RETRY 7 /* 7: infinite */ struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */ .mutex = __MUTEX_INITIALIZER(smc_ib_devices.mutex), .list = LIST_HEAD_INIT(smc_ib_devices.list), }; u8 local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */ static int smc_ib_modify_qp_init(struct smc_link *lnk) { struct ib_qp_attr qp_attr; memset(&qp_attr, 0, sizeof(qp_attr)); qp_attr.qp_state = IB_QPS_INIT; qp_attr.pkey_index = 0; qp_attr.port_num = lnk->ibport; qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE; return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_ACCESS_FLAGS | IB_QP_PORT); } static int smc_ib_modify_qp_rtr(struct smc_link *lnk) { enum ib_qp_attr_mask qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER; struct ib_qp_attr qp_attr; u8 hop_lim = 1; memset(&qp_attr, 0, sizeof(qp_attr)); qp_attr.qp_state = IB_QPS_RTR; qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu); qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport); if (lnk->lgr->smc_version == SMC_V2 && lnk->lgr->uses_gateway) hop_lim = IPV6_DEFAULT_HOPLIMIT; rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, hop_lim, 0); rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid); if (lnk->lgr->smc_version == SMC_V2 && lnk->lgr->uses_gateway) memcpy(&qp_attr.ah_attr.roce.dmac, lnk->lgr->nexthop_mac, sizeof(lnk->lgr->nexthop_mac)); else memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac, sizeof(lnk->peer_mac)); qp_attr.dest_qp_num = lnk->peer_qpn; qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */ qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming * requests */ qp_attr.min_rnr_timer = SMC_QP_MIN_RNR_TIMER; return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask); } int smc_ib_modify_qp_rts(struct smc_link *lnk) { struct ib_qp_attr qp_attr; memset(&qp_attr, 0, sizeof(qp_attr)); qp_attr.qp_state = IB_QPS_RTS; qp_attr.timeout = SMC_QP_TIMEOUT; /* local ack timeout */ qp_attr.retry_cnt = SMC_QP_RETRY_CNT; /* retry count */ qp_attr.rnr_retry = SMC_QP_RNR_RETRY; /* RNR retries, 7=infinite */ qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */ qp_attr.max_rd_atomic = 1; /* # of outstanding RDMA reads and * atomic ops allowed */ return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_SQ_PSN | IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC); } int smc_ib_modify_qp_error(struct smc_link *lnk) { struct ib_qp_attr qp_attr; memset(&qp_attr, 0, sizeof(qp_attr)); qp_attr.qp_state = IB_QPS_ERR; return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE); } int smc_ib_ready_link(struct smc_link *lnk) { struct smc_link_group *lgr = smc_get_lgr(lnk); int rc = 0; rc = smc_ib_modify_qp_init(lnk); if (rc) goto out; rc = smc_ib_modify_qp_rtr(lnk); if (rc) goto out; smc_wr_remember_qp_attr(lnk); rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv, IB_CQ_SOLICITED_MASK); if (rc) goto out; rc = smc_wr_rx_post_init(lnk); if (rc) goto out; smc_wr_remember_qp_attr(lnk); if (lgr->role == SMC_SERV) { rc = smc_ib_modify_qp_rts(lnk); if (rc) goto out; smc_wr_remember_qp_attr(lnk); } out: return rc; } static int smc_ib_fill_mac(struct smc_ib_device *smcibdev, u8 ibport) { const struct ib_gid_attr *attr; int rc; attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, 0); if (IS_ERR(attr)) return -ENODEV; rc = rdma_read_gid_l2_fields(attr, NULL, smcibdev->mac[ibport - 1]); rdma_put_gid_attr(attr); return rc; } /* Create an identifier unique for this instance of SMC-R. * The MAC-address of the first active registered IB device * plus a random 2-byte number is used to create this identifier. * This name is delivered to the peer during connection initialization. */ static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev, u8 ibport) { memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1], sizeof(smcibdev->mac[ibport - 1])); } bool smc_ib_is_valid_local_systemid(void) { return !is_zero_ether_addr(&local_systemid[2]); } static void smc_ib_init_local_systemid(void) { get_random_bytes(&local_systemid[0], 2); } bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport) { return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE; } int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr, u8 nexthop_mac[], u8 *uses_gateway) { struct neighbour *neigh = NULL; struct rtable *rt = NULL; struct flowi4 fl4 = { .saddr = saddr, .daddr = daddr }; if (daddr == cpu_to_be32(INADDR_NONE)) goto out; rt = ip_route_output_flow(net, &fl4, NULL); if (IS_ERR(rt)) goto out; if (rt->rt_uses_gateway && rt->rt_gw_family != AF_INET) goto out_rt; neigh = dst_neigh_lookup(&rt->dst, &fl4.daddr); if (!neigh) goto out_rt; memcpy(nexthop_mac, neigh->ha, ETH_ALEN); *uses_gateway = rt->rt_uses_gateway; neigh_release(neigh); ip_rt_put(rt); return 0; out_rt: ip_rt_put(rt); out: return -ENOENT; } static int smc_ib_determine_gid_rcu(const struct net_device *ndev, const struct ib_gid_attr *attr, u8 gid[], u8 *sgid_index, struct smc_init_info_smcrv2 *smcrv2) { if (!smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE) { if (gid) memcpy(gid, &attr->gid, SMC_GID_SIZE); if (sgid_index) *sgid_index = attr->index; return 0; } if (smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP && smc_ib_gid_to_ipv4((u8 *)&attr->gid) != cpu_to_be32(INADDR_NONE)) { struct in_device *in_dev = __in_dev_get_rcu(ndev); struct net *net = dev_net(ndev); const struct in_ifaddr *ifa; bool subnet_match = false; if (!in_dev) goto out; in_dev_for_each_ifa_rcu(ifa, in_dev) { if (!inet_ifa_match(smcrv2->saddr, ifa)) continue; subnet_match = true; break; } if (!subnet_match) goto out; if (smcrv2->daddr && smc_ib_find_route(net, smcrv2->saddr, smcrv2->daddr, smcrv2->nexthop_mac, &smcrv2->uses_gateway)) goto out; if (gid) memcpy(gid, &attr->gid, SMC_GID_SIZE); if (sgid_index) *sgid_index = attr->index; return 0; } out: return -ENODEV; } /* determine the gid for an ib-device port and vlan id */ int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport, unsigned short vlan_id, u8 gid[], u8 *sgid_index, struct smc_init_info_smcrv2 *smcrv2) { const struct ib_gid_attr *attr; const struct net_device *ndev; int i; for (i = 0; i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) { attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i); if (IS_ERR(attr)) continue; rcu_read_lock(); ndev = rdma_read_gid_attr_ndev_rcu(attr); if (!IS_ERR(ndev) && ((!vlan_id && !is_vlan_dev(ndev)) || (vlan_id && is_vlan_dev(ndev) && vlan_dev_vlan_id(ndev) == vlan_id))) { if (!smc_ib_determine_gid_rcu(ndev, attr, gid, sgid_index, smcrv2)) { rcu_read_unlock(); rdma_put_gid_attr(attr); return 0; } } rcu_read_unlock(); rdma_put_gid_attr(attr); } return -ENODEV; } /* check if gid is still defined on smcibdev */ static bool smc_ib_check_link_gid(u8 gid[SMC_GID_SIZE], bool smcrv2, struct smc_ib_device *smcibdev, u8 ibport) { const struct ib_gid_attr *attr; bool rc = false; int i; for (i = 0; !rc && i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) { attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i); if (IS_ERR(attr)) continue; rcu_read_lock(); if ((!smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE) || (smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP && !(ipv6_addr_type((const struct in6_addr *)&attr->gid) & IPV6_ADDR_LINKLOCAL))) if (!memcmp(gid, &attr->gid, SMC_GID_SIZE)) rc = true; rcu_read_unlock(); rdma_put_gid_attr(attr); } return rc; } /* check all links if the gid is still defined on smcibdev */ static void smc_ib_gid_check(struct smc_ib_device *smcibdev, u8 ibport) { struct smc_link_group *lgr; int i; spin_lock_bh(&smc_lgr_list.lock); list_for_each_entry(lgr, &smc_lgr_list.list, list) { if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id, SMC_MAX_PNETID_LEN)) continue; /* lgr is not affected */ if (list_empty(&lgr->list)) continue; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (lgr->lnk[i].state == SMC_LNK_UNUSED || lgr->lnk[i].smcibdev != smcibdev) continue; if (!smc_ib_check_link_gid(lgr->lnk[i].gid, lgr->smc_version == SMC_V2, smcibdev, ibport)) smcr_port_err(smcibdev, ibport); } } spin_unlock_bh(&smc_lgr_list.lock); } static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport) { int rc; memset(&smcibdev->pattr[ibport - 1], 0, sizeof(smcibdev->pattr[ibport - 1])); rc = ib_query_port(smcibdev->ibdev, ibport, &smcibdev->pattr[ibport - 1]); if (rc) goto out; /* the SMC protocol requires specification of the RoCE MAC address */ rc = smc_ib_fill_mac(smcibdev, ibport); if (rc) goto out; if (!smc_ib_is_valid_local_systemid() && smc_ib_port_active(smcibdev, ibport)) /* create unique system identifier */ smc_ib_define_local_systemid(smcibdev, ibport); out: return rc; } /* process context wrapper for might_sleep smc_ib_remember_port_attr */ static void smc_ib_port_event_work(struct work_struct *work) { struct smc_ib_device *smcibdev = container_of( work, struct smc_ib_device, port_event_work); u8 port_idx; for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) { smc_ib_remember_port_attr(smcibdev, port_idx + 1); clear_bit(port_idx, &smcibdev->port_event_mask); if (!smc_ib_port_active(smcibdev, port_idx + 1)) { set_bit(port_idx, smcibdev->ports_going_away); smcr_port_err(smcibdev, port_idx + 1); } else { clear_bit(port_idx, smcibdev->ports_going_away); smcr_port_add(smcibdev, port_idx + 1); smc_ib_gid_check(smcibdev, port_idx + 1); } } } /* can be called in IRQ context */ static void smc_ib_global_event_handler(struct ib_event_handler *handler, struct ib_event *ibevent) { struct smc_ib_device *smcibdev; bool schedule = false; u8 port_idx; smcibdev = container_of(handler, struct smc_ib_device, event_handler); switch (ibevent->event) { case IB_EVENT_DEVICE_FATAL: /* terminate all ports on device */ for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) { set_bit(port_idx, &smcibdev->port_event_mask); if (!test_and_set_bit(port_idx, smcibdev->ports_going_away)) schedule = true; } if (schedule) schedule_work(&smcibdev->port_event_work); break; case IB_EVENT_PORT_ACTIVE: port_idx = ibevent->element.port_num - 1; if (port_idx >= SMC_MAX_PORTS) break; set_bit(port_idx, &smcibdev->port_event_mask); if (test_and_clear_bit(port_idx, smcibdev->ports_going_away)) schedule_work(&smcibdev->port_event_work); break; case IB_EVENT_PORT_ERR: port_idx = ibevent->element.port_num - 1; if (port_idx >= SMC_MAX_PORTS) break; set_bit(port_idx, &smcibdev->port_event_mask); if (!test_and_set_bit(port_idx, smcibdev->ports_going_away)) schedule_work(&smcibdev->port_event_work); break; case IB_EVENT_GID_CHANGE: port_idx = ibevent->element.port_num - 1; if (port_idx >= SMC_MAX_PORTS) break; set_bit(port_idx, &smcibdev->port_event_mask); schedule_work(&smcibdev->port_event_work); break; default: break; } } void smc_ib_dealloc_protection_domain(struct smc_link *lnk) { if (lnk->roce_pd) ib_dealloc_pd(lnk->roce_pd); lnk->roce_pd = NULL; } int smc_ib_create_protection_domain(struct smc_link *lnk) { int rc; lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0); rc = PTR_ERR_OR_ZERO(lnk->roce_pd); if (IS_ERR(lnk->roce_pd)) lnk->roce_pd = NULL; return rc; } static bool smcr_diag_is_dev_critical(struct smc_lgr_list *smc_lgr, struct smc_ib_device *smcibdev) { struct smc_link_group *lgr; bool rc = false; int i; spin_lock_bh(&smc_lgr->lock); list_for_each_entry(lgr, &smc_lgr->list, list) { if (lgr->is_smcd) continue; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (lgr->lnk[i].state == SMC_LNK_UNUSED || lgr->lnk[i].smcibdev != smcibdev) continue; if (lgr->type == SMC_LGR_SINGLE || lgr->type == SMC_LGR_ASYMMETRIC_LOCAL) { rc = true; goto out; } } } out: spin_unlock_bh(&smc_lgr->lock); return rc; } static int smc_nl_handle_dev_port(struct sk_buff *skb, struct ib_device *ibdev, struct smc_ib_device *smcibdev, int port) { char smc_pnet[SMC_MAX_PNETID_LEN + 1]; struct nlattr *port_attrs; unsigned char port_state; int lnk_count = 0; port_attrs = nla_nest_start(skb, SMC_NLA_DEV_PORT + port); if (!port_attrs) goto errout; if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR, smcibdev->pnetid_by_user[port])) goto errattr; memcpy(smc_pnet, &smcibdev->pnetid[port], SMC_MAX_PNETID_LEN); smc_pnet[SMC_MAX_PNETID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet)) goto errattr; if (nla_put_u32(skb, SMC_NLA_DEV_PORT_NETDEV, smcibdev->ndev_ifidx[port])) goto errattr; if (nla_put_u8(skb, SMC_NLA_DEV_PORT_VALID, 1)) goto errattr; port_state = smc_ib_port_active(smcibdev, port + 1); if (nla_put_u8(skb, SMC_NLA_DEV_PORT_STATE, port_state)) goto errattr; lnk_count = atomic_read(&smcibdev->lnk_cnt_by_port[port]); if (nla_put_u32(skb, SMC_NLA_DEV_PORT_LNK_CNT, lnk_count)) goto errattr; nla_nest_end(skb, port_attrs); return 0; errattr: nla_nest_cancel(skb, port_attrs); errout: return -EMSGSIZE; } static bool smc_nl_handle_pci_values(const struct smc_pci_dev *smc_pci_dev, struct sk_buff *skb) { if (nla_put_u32(skb, SMC_NLA_DEV_PCI_FID, smc_pci_dev->pci_fid)) return false; if (nla_put_u16(skb, SMC_NLA_DEV_PCI_CHID, smc_pci_dev->pci_pchid)) return false; if (nla_put_u16(skb, SMC_NLA_DEV_PCI_VENDOR, smc_pci_dev->pci_vendor)) return false; if (nla_put_u16(skb, SMC_NLA_DEV_PCI_DEVICE, smc_pci_dev->pci_device)) return false; if (nla_put_string(skb, SMC_NLA_DEV_PCI_ID, smc_pci_dev->pci_id)) return false; return true; } static int smc_nl_handle_smcr_dev(struct smc_ib_device *smcibdev, struct sk_buff *skb, struct netlink_callback *cb) { char smc_ibname[IB_DEVICE_NAME_MAX]; struct smc_pci_dev smc_pci_dev; struct pci_dev *pci_dev; unsigned char is_crit; struct nlattr *attrs; void *nlh; int i; nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &smc_gen_nl_family, NLM_F_MULTI, SMC_NETLINK_GET_DEV_SMCR); if (!nlh) goto errmsg; attrs = nla_nest_start(skb, SMC_GEN_DEV_SMCR); if (!attrs) goto errout; is_crit = smcr_diag_is_dev_critical(&smc_lgr_list, smcibdev); if (nla_put_u8(skb, SMC_NLA_DEV_IS_CRIT, is_crit)) goto errattr; if (smcibdev->ibdev->dev.parent) { memset(&smc_pci_dev, 0, sizeof(smc_pci_dev)); pci_dev = to_pci_dev(smcibdev->ibdev->dev.parent); smc_set_pci_values(pci_dev, &smc_pci_dev); if (!smc_nl_handle_pci_values(&smc_pci_dev, skb)) goto errattr; } snprintf(smc_ibname, sizeof(smc_ibname), "%s", smcibdev->ibdev->name); if (nla_put_string(skb, SMC_NLA_DEV_IB_NAME, smc_ibname)) goto errattr; for (i = 1; i <= SMC_MAX_PORTS; i++) { if (!rdma_is_port_valid(smcibdev->ibdev, i)) continue; if (smc_nl_handle_dev_port(skb, smcibdev->ibdev, smcibdev, i - 1)) goto errattr; } nla_nest_end(skb, attrs); genlmsg_end(skb, nlh); return 0; errattr: nla_nest_cancel(skb, attrs); errout: genlmsg_cancel(skb, nlh); errmsg: return -EMSGSIZE; } static void smc_nl_prep_smcr_dev(struct smc_ib_devices *dev_list, struct sk_buff *skb, struct netlink_callback *cb) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); struct smc_ib_device *smcibdev; int snum = cb_ctx->pos[0]; int num = 0; mutex_lock(&dev_list->mutex); list_for_each_entry(smcibdev, &dev_list->list, list) { if (num < snum) goto next; if (smc_nl_handle_smcr_dev(smcibdev, skb, cb)) goto errout; next: num++; } errout: mutex_unlock(&dev_list->mutex); cb_ctx->pos[0] = num; } int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb) { smc_nl_prep_smcr_dev(&smc_ib_devices, skb, cb); return skb->len; } static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) { struct smc_link *lnk = (struct smc_link *)priv; struct smc_ib_device *smcibdev = lnk->smcibdev; u8 port_idx; switch (ibevent->event) { case IB_EVENT_QP_FATAL: case IB_EVENT_QP_ACCESS_ERR: port_idx = ibevent->element.qp->port - 1; if (port_idx >= SMC_MAX_PORTS) break; set_bit(port_idx, &smcibdev->port_event_mask); if (!test_and_set_bit(port_idx, smcibdev->ports_going_away)) schedule_work(&smcibdev->port_event_work); break; default: break; } } void smc_ib_destroy_queue_pair(struct smc_link *lnk) { if (lnk->roce_qp) ib_destroy_qp(lnk->roce_qp); lnk->roce_qp = NULL; } /* create a queue pair within the protection domain for a link */ int smc_ib_create_queue_pair(struct smc_link *lnk) { int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1; struct ib_qp_init_attr qp_attr = { .event_handler = smc_ib_qp_event_handler, .qp_context = lnk, .send_cq = lnk->smcibdev->roce_cq_send, .recv_cq = lnk->smcibdev->roce_cq_recv, .srq = NULL, .cap = { /* include unsolicited rdma_writes as well, * there are max. 2 RDMA_WRITE per 1 WR_SEND */ .max_send_wr = SMC_WR_BUF_CNT * 3, .max_recv_wr = SMC_WR_BUF_CNT * 3, .max_send_sge = SMC_IB_MAX_SEND_SGE, .max_recv_sge = sges_per_buf, .max_inline_data = 0, }, .sq_sig_type = IB_SIGNAL_REQ_WR, .qp_type = IB_QPT_RC, }; int rc; lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr); rc = PTR_ERR_OR_ZERO(lnk->roce_qp); if (IS_ERR(lnk->roce_qp)) lnk->roce_qp = NULL; else smc_wr_remember_qp_attr(lnk); return rc; } void smc_ib_put_memory_region(struct ib_mr *mr) { ib_dereg_mr(mr); } static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot, u8 link_idx) { unsigned int offset = 0; int sg_num; /* map the largest prefix of a dma mapped SG list */ sg_num = ib_map_mr_sg(buf_slot->mr[link_idx], buf_slot->sgt[link_idx].sgl, buf_slot->sgt[link_idx].orig_nents, &offset, PAGE_SIZE); return sg_num; } /* Allocate a memory region and map the dma mapped SG list of buf_slot */ int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, struct smc_buf_desc *buf_slot, u8 link_idx) { if (buf_slot->mr[link_idx]) return 0; /* already done */ buf_slot->mr[link_idx] = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order); if (IS_ERR(buf_slot->mr[link_idx])) { int rc; rc = PTR_ERR(buf_slot->mr[link_idx]); buf_slot->mr[link_idx] = NULL; return rc; } if (smc_ib_map_mr_sg(buf_slot, link_idx) != buf_slot->sgt[link_idx].orig_nents) return -EINVAL; return 0; } bool smc_ib_is_sg_need_sync(struct smc_link *lnk, struct smc_buf_desc *buf_slot) { struct scatterlist *sg; unsigned int i; bool ret = false; /* for now there is just one DMA address */ for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg, buf_slot->sgt[lnk->link_idx].nents, i) { if (!sg_dma_len(sg)) break; if (dma_need_sync(lnk->smcibdev->ibdev->dma_device, sg_dma_address(sg))) { ret = true; goto out; } } out: return ret; } /* synchronize buffer usage for cpu access */ void smc_ib_sync_sg_for_cpu(struct smc_link *lnk, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction) { struct scatterlist *sg; unsigned int i; if (!(buf_slot->is_dma_need_sync & (1U << lnk->link_idx))) return; /* for now there is just one DMA address */ for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg, buf_slot->sgt[lnk->link_idx].nents, i) { if (!sg_dma_len(sg)) break; ib_dma_sync_single_for_cpu(lnk->smcibdev->ibdev, sg_dma_address(sg), sg_dma_len(sg), data_direction); } } /* synchronize buffer usage for device access */ void smc_ib_sync_sg_for_device(struct smc_link *lnk, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction) { struct scatterlist *sg; unsigned int i; if (!(buf_slot->is_dma_need_sync & (1U << lnk->link_idx))) return; /* for now there is just one DMA address */ for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg, buf_slot->sgt[lnk->link_idx].nents, i) { if (!sg_dma_len(sg)) break; ib_dma_sync_single_for_device(lnk->smcibdev->ibdev, sg_dma_address(sg), sg_dma_len(sg), data_direction); } } /* Map a new TX or RX buffer SG-table to DMA */ int smc_ib_buf_map_sg(struct smc_link *lnk, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction) { int mapped_nents; mapped_nents = ib_dma_map_sg(lnk->smcibdev->ibdev, buf_slot->sgt[lnk->link_idx].sgl, buf_slot->sgt[lnk->link_idx].orig_nents, data_direction); if (!mapped_nents) return -ENOMEM; return mapped_nents; } void smc_ib_buf_unmap_sg(struct smc_link *lnk, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction) { if (!buf_slot->sgt[lnk->link_idx].sgl->dma_address) return; /* already unmapped */ ib_dma_unmap_sg(lnk->smcibdev->ibdev, buf_slot->sgt[lnk->link_idx].sgl, buf_slot->sgt[lnk->link_idx].orig_nents, data_direction); buf_slot->sgt[lnk->link_idx].sgl->dma_address = 0; } long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) { struct ib_cq_init_attr cqattr = { .cqe = SMC_MAX_CQE, .comp_vector = 0 }; int cqe_size_order, smc_order; long rc; mutex_lock(&smcibdev->mutex); rc = 0; if (smcibdev->initialized) goto out; /* the calculated number of cq entries fits to mlx5 cq allocation */ cqe_size_order = cache_line_size() == 128 ? 7 : 6; smc_order = MAX_PAGE_ORDER - cqe_size_order; if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE) cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2; smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev, smc_wr_tx_cq_handler, NULL, smcibdev, &cqattr); rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send); if (IS_ERR(smcibdev->roce_cq_send)) { smcibdev->roce_cq_send = NULL; goto out; } smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev, smc_wr_rx_cq_handler, NULL, smcibdev, &cqattr); rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv); if (IS_ERR(smcibdev->roce_cq_recv)) { smcibdev->roce_cq_recv = NULL; goto err; } smc_wr_add_dev(smcibdev); smcibdev->initialized = 1; goto out; err: ib_destroy_cq(smcibdev->roce_cq_send); out: mutex_unlock(&smcibdev->mutex); return rc; } static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev) { mutex_lock(&smcibdev->mutex); if (!smcibdev->initialized) goto out; smcibdev->initialized = 0; ib_destroy_cq(smcibdev->roce_cq_recv); ib_destroy_cq(smcibdev->roce_cq_send); smc_wr_remove_dev(smcibdev); out: mutex_unlock(&smcibdev->mutex); } static struct ib_client smc_ib_client; static void smc_copy_netdev_ifindex(struct smc_ib_device *smcibdev, int port) { struct ib_device *ibdev = smcibdev->ibdev; struct net_device *ndev; if (!ibdev->ops.get_netdev) return; ndev = ibdev->ops.get_netdev(ibdev, port + 1); if (ndev) { smcibdev->ndev_ifidx[port] = ndev->ifindex; dev_put(ndev); } } void smc_ib_ndev_change(struct net_device *ndev, unsigned long event) { struct smc_ib_device *smcibdev; struct ib_device *libdev; struct net_device *lndev; u8 port_cnt; int i; mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(smcibdev, &smc_ib_devices.list, list) { port_cnt = smcibdev->ibdev->phys_port_cnt; for (i = 0; i < min_t(size_t, port_cnt, SMC_MAX_PORTS); i++) { libdev = smcibdev->ibdev; if (!libdev->ops.get_netdev) continue; lndev = libdev->ops.get_netdev(libdev, i + 1); dev_put(lndev); if (lndev != ndev) continue; if (event == NETDEV_REGISTER) smcibdev->ndev_ifidx[i] = ndev->ifindex; if (event == NETDEV_UNREGISTER) smcibdev->ndev_ifidx[i] = 0; } } mutex_unlock(&smc_ib_devices.mutex); } /* callback function for ib_register_client() */ static int smc_ib_add_dev(struct ib_device *ibdev) { struct smc_ib_device *smcibdev; u8 port_cnt; int i; if (ibdev->node_type != RDMA_NODE_IB_CA) return -EOPNOTSUPP; smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL); if (!smcibdev) return -ENOMEM; smcibdev->ibdev = ibdev; INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work); atomic_set(&smcibdev->lnk_cnt, 0); init_waitqueue_head(&smcibdev->lnks_deleted); mutex_init(&smcibdev->mutex); mutex_lock(&smc_ib_devices.mutex); list_add_tail(&smcibdev->list, &smc_ib_devices.list); mutex_unlock(&smc_ib_devices.mutex); ib_set_client_data(ibdev, &smc_ib_client, smcibdev); INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev, smc_ib_global_event_handler); ib_register_event_handler(&smcibdev->event_handler); /* trigger reading of the port attributes */ port_cnt = smcibdev->ibdev->phys_port_cnt; pr_warn_ratelimited("smc: adding ib device %s with port count %d\n", smcibdev->ibdev->name, port_cnt); for (i = 0; i < min_t(size_t, port_cnt, SMC_MAX_PORTS); i++) { set_bit(i, &smcibdev->port_event_mask); /* determine pnetids of the port */ if (smc_pnetid_by_dev_port(ibdev->dev.parent, i, smcibdev->pnetid[i])) smc_pnetid_by_table_ib(smcibdev, i + 1); smc_copy_netdev_ifindex(smcibdev, i); pr_warn_ratelimited("smc: ib device %s port %d has pnetid " "%.16s%s\n", smcibdev->ibdev->name, i + 1, smcibdev->pnetid[i], smcibdev->pnetid_by_user[i] ? " (user defined)" : ""); } schedule_work(&smcibdev->port_event_work); return 0; } /* callback function for ib_unregister_client() */ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data) { struct smc_ib_device *smcibdev = client_data; mutex_lock(&smc_ib_devices.mutex); list_del_init(&smcibdev->list); /* remove from smc_ib_devices */ mutex_unlock(&smc_ib_devices.mutex); pr_warn_ratelimited("smc: removing ib device %s\n", smcibdev->ibdev->name); smc_smcr_terminate_all(smcibdev); smc_ib_cleanup_per_ibdev(smcibdev); ib_unregister_event_handler(&smcibdev->event_handler); cancel_work_sync(&smcibdev->port_event_work); kfree(smcibdev); } static struct ib_client smc_ib_client = { .name = "smc_ib", .add = smc_ib_add_dev, .remove = smc_ib_remove_dev, }; int __init smc_ib_register_client(void) { smc_ib_init_local_systemid(); return ib_register_client(&smc_ib_client); } void smc_ib_unregister_client(void) { ib_unregister_client(&smc_ib_client); }
20 13 20 8 20 6 2 3 2 20 20 20 20 20 21 1 20 17 2 1 10 9 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 // SPDX-License-Identifier: GPL-2.0-only /* * RDMA transport layer based on the trans_fd.c implementation. * * Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com> * Copyright (C) 2006 by Russ Cox <rsc@swtch.com> * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net> * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/in.h> #include <linux/module.h> #include <linux/net.h> #include <linux/ipv6.h> #include <linux/kthread.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/un.h> #include <linux/uaccess.h> #include <linux/inet.h> #include <linux/file.h> #include <linux/parser.h> #include <linux/semaphore.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include <net/9p/transport.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> #define P9_PORT 5640 #define P9_RDMA_SQ_DEPTH 32 #define P9_RDMA_RQ_DEPTH 32 #define P9_RDMA_SEND_SGE 4 #define P9_RDMA_RECV_SGE 4 #define P9_RDMA_IRD 0 #define P9_RDMA_ORD 0 #define P9_RDMA_TIMEOUT 30000 /* 30 seconds */ #define P9_RDMA_MAXSIZE (1024*1024) /* 1MB */ /** * struct p9_trans_rdma - RDMA transport instance * * @state: tracks the transport state machine for connection setup and tear down * @cm_id: The RDMA CM ID * @pd: Protection Domain pointer * @qp: Queue Pair pointer * @cq: Completion Queue pointer * @timeout: Number of uSecs to wait for connection management events * @privport: Whether a privileged port may be used * @port: The port to use * @sq_depth: The depth of the Send Queue * @sq_sem: Semaphore for the SQ * @rq_depth: The depth of the Receive Queue. * @rq_sem: Semaphore for the RQ * @excess_rc : Amount of posted Receive Contexts without a pending request. * See rdma_request() * @addr: The remote peer's address * @req_lock: Protects the active request list * @cm_done: Completion event for connection management tracking */ struct p9_trans_rdma { enum { P9_RDMA_INIT, P9_RDMA_ADDR_RESOLVED, P9_RDMA_ROUTE_RESOLVED, P9_RDMA_CONNECTED, P9_RDMA_FLUSHING, P9_RDMA_CLOSING, P9_RDMA_CLOSED, } state; struct rdma_cm_id *cm_id; struct ib_pd *pd; struct ib_qp *qp; struct ib_cq *cq; long timeout; bool privport; u16 port; int sq_depth; struct semaphore sq_sem; int rq_depth; struct semaphore rq_sem; atomic_t excess_rc; struct sockaddr_in addr; spinlock_t req_lock; struct completion cm_done; }; struct p9_rdma_req; /** * struct p9_rdma_context - Keeps track of in-process WR * * @cqe: completion queue entry * @busa: Bus address to unmap when the WR completes * @req: Keeps track of requests (send) * @rc: Keepts track of replies (receive) */ struct p9_rdma_context { struct ib_cqe cqe; dma_addr_t busa; union { struct p9_req_t *req; struct p9_fcall rc; }; }; /** * struct p9_rdma_opts - Collection of mount options * @port: port of connection * @privport: Whether a privileged port may be used * @sq_depth: The requested depth of the SQ. This really doesn't need * to be any deeper than the number of threads used in the client * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth * @timeout: Time to wait in msecs for CM events */ struct p9_rdma_opts { short port; bool privport; int sq_depth; int rq_depth; long timeout; }; /* * Option Parsing (code inspired by NFS code) */ enum { /* Options that take integer arguments */ Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout, /* Options that take no argument */ Opt_privport, Opt_err, }; static match_table_t tokens = { {Opt_port, "port=%u"}, {Opt_sq_depth, "sq=%u"}, {Opt_rq_depth, "rq=%u"}, {Opt_timeout, "timeout=%u"}, {Opt_privport, "privport"}, {Opt_err, NULL}, }; static int p9_rdma_show_options(struct seq_file *m, struct p9_client *clnt) { struct p9_trans_rdma *rdma = clnt->trans; if (rdma->port != P9_PORT) seq_printf(m, ",port=%u", rdma->port); if (rdma->sq_depth != P9_RDMA_SQ_DEPTH) seq_printf(m, ",sq=%u", rdma->sq_depth); if (rdma->rq_depth != P9_RDMA_RQ_DEPTH) seq_printf(m, ",rq=%u", rdma->rq_depth); if (rdma->timeout != P9_RDMA_TIMEOUT) seq_printf(m, ",timeout=%lu", rdma->timeout); if (rdma->privport) seq_puts(m, ",privport"); return 0; } /** * parse_opts - parse mount options into rdma options structure * @params: options string passed from mount * @opts: rdma transport-specific structure to parse options into * * Returns 0 upon success, -ERRNO upon failure */ static int parse_opts(char *params, struct p9_rdma_opts *opts) { char *p; substring_t args[MAX_OPT_ARGS]; int option; char *options, *tmp_options; opts->port = P9_PORT; opts->sq_depth = P9_RDMA_SQ_DEPTH; opts->rq_depth = P9_RDMA_RQ_DEPTH; opts->timeout = P9_RDMA_TIMEOUT; opts->privport = false; if (!params) return 0; tmp_options = kstrdup(params, GFP_KERNEL); if (!tmp_options) { p9_debug(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM; } options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; int r; if (!*p) continue; token = match_token(p, tokens, args); if ((token != Opt_err) && (token != Opt_privport)) { r = match_int(&args[0], &option); if (r < 0) { p9_debug(P9_DEBUG_ERROR, "integer field, but no integer?\n"); continue; } } switch (token) { case Opt_port: opts->port = option; break; case Opt_sq_depth: opts->sq_depth = option; break; case Opt_rq_depth: opts->rq_depth = option; break; case Opt_timeout: opts->timeout = option; break; case Opt_privport: opts->privport = true; break; default: continue; } } /* RQ must be at least as large as the SQ */ opts->rq_depth = max(opts->rq_depth, opts->sq_depth); kfree(tmp_options); return 0; } static int p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) { struct p9_client *c = id->context; struct p9_trans_rdma *rdma = c->trans; switch (event->event) { case RDMA_CM_EVENT_ADDR_RESOLVED: BUG_ON(rdma->state != P9_RDMA_INIT); rdma->state = P9_RDMA_ADDR_RESOLVED; break; case RDMA_CM_EVENT_ROUTE_RESOLVED: BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED); rdma->state = P9_RDMA_ROUTE_RESOLVED; break; case RDMA_CM_EVENT_ESTABLISHED: BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED); rdma->state = P9_RDMA_CONNECTED; break; case RDMA_CM_EVENT_DISCONNECTED: if (rdma) rdma->state = P9_RDMA_CLOSED; c->status = Disconnected; break; case RDMA_CM_EVENT_TIMEWAIT_EXIT: break; case RDMA_CM_EVENT_ADDR_CHANGE: case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_DEVICE_REMOVAL: case RDMA_CM_EVENT_MULTICAST_JOIN: case RDMA_CM_EVENT_MULTICAST_ERROR: case RDMA_CM_EVENT_REJECTED: case RDMA_CM_EVENT_CONNECT_REQUEST: case RDMA_CM_EVENT_CONNECT_RESPONSE: case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_ADDR_ERROR: case RDMA_CM_EVENT_UNREACHABLE: c->status = Disconnected; rdma_disconnect(rdma->cm_id); break; default: BUG(); } complete(&rdma->cm_done); return 0; } static void recv_done(struct ib_cq *cq, struct ib_wc *wc) { struct p9_client *client = cq->cq_context; struct p9_trans_rdma *rdma = client->trans; struct p9_rdma_context *c = container_of(wc->wr_cqe, struct p9_rdma_context, cqe); struct p9_req_t *req; int err = 0; int16_t tag; req = NULL; ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, DMA_FROM_DEVICE); if (wc->status != IB_WC_SUCCESS) goto err_out; c->rc.size = wc->byte_len; err = p9_parse_header(&c->rc, NULL, NULL, &tag, 1); if (err) goto err_out; req = p9_tag_lookup(client, tag); if (!req) goto err_out; /* Check that we have not yet received a reply for this request. */ if (unlikely(req->rc.sdata)) { pr_err("Duplicate reply for request %d", tag); goto err_out; } req->rc.size = c->rc.size; req->rc.sdata = c->rc.sdata; p9_client_cb(client, req, REQ_STATUS_RCVD); out: up(&rdma->rq_sem); kfree(c); return; err_out: p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n", req, err, wc->status); rdma->state = P9_RDMA_FLUSHING; client->status = Disconnected; goto out; } static void send_done(struct ib_cq *cq, struct ib_wc *wc) { struct p9_client *client = cq->cq_context; struct p9_trans_rdma *rdma = client->trans; struct p9_rdma_context *c = container_of(wc->wr_cqe, struct p9_rdma_context, cqe); ib_dma_unmap_single(rdma->cm_id->device, c->busa, c->req->tc.size, DMA_TO_DEVICE); up(&rdma->sq_sem); p9_req_put(client, c->req); kfree(c); } static void qp_event_handler(struct ib_event *event, void *context) { p9_debug(P9_DEBUG_ERROR, "QP event %d context %p\n", event->event, context); } static void rdma_destroy_trans(struct p9_trans_rdma *rdma) { if (!rdma) return; if (rdma->qp && !IS_ERR(rdma->qp)) ib_destroy_qp(rdma->qp); if (rdma->pd && !IS_ERR(rdma->pd)) ib_dealloc_pd(rdma->pd); if (rdma->cq && !IS_ERR(rdma->cq)) ib_free_cq(rdma->cq); if (rdma->cm_id && !IS_ERR(rdma->cm_id)) rdma_destroy_id(rdma->cm_id); kfree(rdma); } static int post_recv(struct p9_client *client, struct p9_rdma_context *c) { struct p9_trans_rdma *rdma = client->trans; struct ib_recv_wr wr; struct ib_sge sge; int ret; c->busa = ib_dma_map_single(rdma->cm_id->device, c->rc.sdata, client->msize, DMA_FROM_DEVICE); if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) goto error; c->cqe.done = recv_done; sge.addr = c->busa; sge.length = client->msize; sge.lkey = rdma->pd->local_dma_lkey; wr.next = NULL; wr.wr_cqe = &c->cqe; wr.sg_list = &sge; wr.num_sge = 1; ret = ib_post_recv(rdma->qp, &wr, NULL); if (ret) ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, DMA_FROM_DEVICE); return ret; error: p9_debug(P9_DEBUG_ERROR, "EIO\n"); return -EIO; } static int rdma_request(struct p9_client *client, struct p9_req_t *req) { struct p9_trans_rdma *rdma = client->trans; struct ib_send_wr wr; struct ib_sge sge; int err = 0; unsigned long flags; struct p9_rdma_context *c = NULL; struct p9_rdma_context *rpl_context = NULL; /* When an error occurs between posting the recv and the send, * there will be a receive context posted without a pending request. * Since there is no way to "un-post" it, we remember it and skip * post_recv() for the next request. * So here, * see if we are this `next request' and need to absorb an excess rc. * If yes, then drop and free our own, and do not recv_post(). **/ if (unlikely(atomic_read(&rdma->excess_rc) > 0)) { if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) { /* Got one! */ p9_fcall_fini(&req->rc); req->rc.sdata = NULL; goto dont_need_post_recv; } else { /* We raced and lost. */ atomic_inc(&rdma->excess_rc); } } /* Allocate an fcall for the reply */ rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS); if (!rpl_context) { err = -ENOMEM; goto recv_error; } rpl_context->rc.sdata = req->rc.sdata; /* * Post a receive buffer for this request. We need to ensure * there is a reply buffer available for every outstanding * request. A flushed request can result in no reply for an * outstanding request, so we must keep a count to avoid * overflowing the RQ. */ if (down_interruptible(&rdma->rq_sem)) { err = -EINTR; goto recv_error; } err = post_recv(client, rpl_context); if (err) { p9_debug(P9_DEBUG_ERROR, "POST RECV failed: %d\n", err); goto recv_error; } /* remove posted receive buffer from request structure */ req->rc.sdata = NULL; dont_need_post_recv: /* Post the request */ c = kmalloc(sizeof *c, GFP_NOFS); if (!c) { err = -ENOMEM; goto send_error; } c->req = req; c->busa = ib_dma_map_single(rdma->cm_id->device, c->req->tc.sdata, c->req->tc.size, DMA_TO_DEVICE); if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) { err = -EIO; goto send_error; } c->cqe.done = send_done; sge.addr = c->busa; sge.length = c->req->tc.size; sge.lkey = rdma->pd->local_dma_lkey; wr.next = NULL; wr.wr_cqe = &c->cqe; wr.opcode = IB_WR_SEND; wr.send_flags = IB_SEND_SIGNALED; wr.sg_list = &sge; wr.num_sge = 1; if (down_interruptible(&rdma->sq_sem)) { err = -EINTR; goto dma_unmap; } /* Mark request as `sent' *before* we actually send it, * because doing if after could erase the REQ_STATUS_RCVD * status in case of a very fast reply. */ WRITE_ONCE(req->status, REQ_STATUS_SENT); err = ib_post_send(rdma->qp, &wr, NULL); if (err) goto dma_unmap; /* Success */ return 0; dma_unmap: ib_dma_unmap_single(rdma->cm_id->device, c->busa, c->req->tc.size, DMA_TO_DEVICE); /* Handle errors that happened during or while preparing the send: */ send_error: WRITE_ONCE(req->status, REQ_STATUS_ERROR); kfree(c); p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err); /* Ach. * We did recv_post(), but not send. We have one recv_post in excess. */ atomic_inc(&rdma->excess_rc); return err; /* Handle errors that happened during or while preparing post_recv(): */ recv_error: kfree(rpl_context); spin_lock_irqsave(&rdma->req_lock, flags); if (err != -EINTR && rdma->state < P9_RDMA_CLOSING) { rdma->state = P9_RDMA_CLOSING; spin_unlock_irqrestore(&rdma->req_lock, flags); rdma_disconnect(rdma->cm_id); } else spin_unlock_irqrestore(&rdma->req_lock, flags); return err; } static void rdma_close(struct p9_client *client) { struct p9_trans_rdma *rdma; if (!client) return; rdma = client->trans; if (!rdma) return; client->status = Disconnected; rdma_disconnect(rdma->cm_id); rdma_destroy_trans(rdma); } /** * alloc_rdma - Allocate and initialize the rdma transport structure * @opts: Mount options structure */ static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts) { struct p9_trans_rdma *rdma; rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL); if (!rdma) return NULL; rdma->port = opts->port; rdma->privport = opts->privport; rdma->sq_depth = opts->sq_depth; rdma->rq_depth = opts->rq_depth; rdma->timeout = opts->timeout; spin_lock_init(&rdma->req_lock); init_completion(&rdma->cm_done); sema_init(&rdma->sq_sem, rdma->sq_depth); sema_init(&rdma->rq_sem, rdma->rq_depth); atomic_set(&rdma->excess_rc, 0); return rdma; } static int rdma_cancel(struct p9_client *client, struct p9_req_t *req) { /* Nothing to do here. * We will take care of it (if we have to) in rdma_cancelled() */ return 1; } /* A request has been fully flushed without a reply. * That means we have posted one buffer in excess. */ static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req) { struct p9_trans_rdma *rdma = client->trans; atomic_inc(&rdma->excess_rc); return 0; } static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma) { struct sockaddr_in cl = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_ANY), }; int port, err = -EINVAL; for (port = P9_DEF_MAX_RESVPORT; port >= P9_DEF_MIN_RESVPORT; port--) { cl.sin_port = htons((ushort)port); err = rdma_bind_addr(rdma->cm_id, (struct sockaddr *)&cl); if (err != -EADDRINUSE) break; } return err; } /** * rdma_create_trans - Transport method for creating a transport instance * @client: client instance * @addr: IP address string * @args: Mount options string */ static int rdma_create_trans(struct p9_client *client, const char *addr, char *args) { int err; struct p9_rdma_opts opts; struct p9_trans_rdma *rdma; struct rdma_conn_param conn_param; struct ib_qp_init_attr qp_attr; if (addr == NULL) return -EINVAL; /* Parse the transport specific mount options */ err = parse_opts(args, &opts); if (err < 0) return err; /* Create and initialize the RDMA transport structure */ rdma = alloc_rdma(&opts); if (!rdma) return -ENOMEM; /* Create the RDMA CM ID */ rdma->cm_id = rdma_create_id(&init_net, p9_cm_event_handler, client, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(rdma->cm_id)) goto error; /* Associate the client with the transport */ client->trans = rdma; /* Bind to a privileged port if we need to */ if (opts.privport) { err = p9_rdma_bind_privport(rdma); if (err < 0) { pr_err("%s (%d): problem binding to privport: %d\n", __func__, task_pid_nr(current), -err); goto error; } } /* Resolve the server's address */ rdma->addr.sin_family = AF_INET; rdma->addr.sin_addr.s_addr = in_aton(addr); rdma->addr.sin_port = htons(opts.port); err = rdma_resolve_addr(rdma->cm_id, NULL, (struct sockaddr *)&rdma->addr, rdma->timeout); if (err) goto error; err = wait_for_completion_interruptible(&rdma->cm_done); if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED)) goto error; /* Resolve the route to the server */ err = rdma_resolve_route(rdma->cm_id, rdma->timeout); if (err) goto error; err = wait_for_completion_interruptible(&rdma->cm_done); if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED)) goto error; /* Create the Completion Queue */ rdma->cq = ib_alloc_cq_any(rdma->cm_id->device, client, opts.sq_depth + opts.rq_depth + 1, IB_POLL_SOFTIRQ); if (IS_ERR(rdma->cq)) goto error; /* Create the Protection Domain */ rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0); if (IS_ERR(rdma->pd)) goto error; /* Create the Queue Pair */ memset(&qp_attr, 0, sizeof qp_attr); qp_attr.event_handler = qp_event_handler; qp_attr.qp_context = client; qp_attr.cap.max_send_wr = opts.sq_depth; qp_attr.cap.max_recv_wr = opts.rq_depth; qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE; qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE; qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; qp_attr.qp_type = IB_QPT_RC; qp_attr.send_cq = rdma->cq; qp_attr.recv_cq = rdma->cq; err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr); if (err) goto error; rdma->qp = rdma->cm_id->qp; /* Request a connection */ memset(&conn_param, 0, sizeof(conn_param)); conn_param.private_data = NULL; conn_param.private_data_len = 0; conn_param.responder_resources = P9_RDMA_IRD; conn_param.initiator_depth = P9_RDMA_ORD; err = rdma_connect(rdma->cm_id, &conn_param); if (err) goto error; err = wait_for_completion_interruptible(&rdma->cm_done); if (err || (rdma->state != P9_RDMA_CONNECTED)) goto error; client->status = Connected; return 0; error: rdma_destroy_trans(rdma); return -ENOTCONN; } static struct p9_trans_module p9_rdma_trans = { .name = "rdma", .maxsize = P9_RDMA_MAXSIZE, .pooled_rbuffers = true, .def = 0, .owner = THIS_MODULE, .create = rdma_create_trans, .close = rdma_close, .request = rdma_request, .cancel = rdma_cancel, .cancelled = rdma_cancelled, .show_options = p9_rdma_show_options, }; /** * p9_trans_rdma_init - Register the 9P RDMA transport driver */ static int __init p9_trans_rdma_init(void) { v9fs_register_trans(&p9_rdma_trans); return 0; } static void __exit p9_trans_rdma_exit(void) { v9fs_unregister_trans(&p9_rdma_trans); } module_init(p9_trans_rdma_init); module_exit(p9_trans_rdma_exit); MODULE_ALIAS_9P("rdma"); MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>"); MODULE_DESCRIPTION("RDMA Transport for 9P"); MODULE_LICENSE("Dual BSD/GPL");
10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 // SPDX-License-Identifier: GPL-2.0+ #include "vkms_drv.h" #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include <drm/drm_probe_helper.h> static const struct drm_connector_funcs vkms_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static const struct drm_encoder_funcs vkms_encoder_funcs = { .destroy = drm_encoder_cleanup, }; static int vkms_conn_get_modes(struct drm_connector *connector) { int count; count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX); drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); return count; } static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = { .get_modes = vkms_conn_get_modes, }; static int vkms_add_overlay_plane(struct vkms_device *vkmsdev, int index, struct drm_crtc *crtc) { struct vkms_plane *overlay; overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY, index); if (IS_ERR(overlay)) return PTR_ERR(overlay); if (!overlay->base.possible_crtcs) overlay->base.possible_crtcs = drm_crtc_mask(crtc); return 0; } int vkms_output_init(struct vkms_device *vkmsdev, int index) { struct vkms_output *output = &vkmsdev->output; struct drm_device *dev = &vkmsdev->drm; struct drm_connector *connector = &output->connector; struct drm_encoder *encoder = &output->encoder; struct drm_crtc *crtc = &output->crtc; struct vkms_plane *primary, *cursor = NULL; int ret; int writeback; unsigned int n; primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index); if (IS_ERR(primary)) return PTR_ERR(primary); if (vkmsdev->config->overlay) { for (n = 0; n < NUM_OVERLAY_PLANES; n++) { ret = vkms_add_overlay_plane(vkmsdev, index, crtc); if (ret) return ret; } } if (vkmsdev->config->cursor) { cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index); if (IS_ERR(cursor)) return PTR_ERR(cursor); } ret = vkms_crtc_init(dev, crtc, &primary->base, &cursor->base); if (ret) return ret; ret = drm_connector_init(dev, connector, &vkms_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); if (ret) { DRM_ERROR("Failed to init connector\n"); goto err_connector; } drm_connector_helper_add(connector, &vkms_conn_helper_funcs); ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs, DRM_MODE_ENCODER_VIRTUAL, NULL); if (ret) { DRM_ERROR("Failed to init encoder\n"); goto err_encoder; } encoder->possible_crtcs = 1; ret = drm_connector_attach_encoder(connector, encoder); if (ret) { DRM_ERROR("Failed to attach connector to encoder\n"); goto err_attach; } if (vkmsdev->config->writeback) { writeback = vkms_enable_writeback_connector(vkmsdev); if (writeback) DRM_ERROR("Failed to init writeback connector\n"); } drm_mode_config_reset(dev); return 0; err_attach: drm_encoder_cleanup(encoder); err_encoder: drm_connector_cleanup(connector); err_connector: drm_crtc_cleanup(crtc); return ret; }
37 2522 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_LABELS_H #define _NF_CONNTRACK_LABELS_H #include <linux/netfilter/nf_conntrack_common.h> #include <linux/netfilter/nf_conntrack_tuple_common.h> #include <linux/types.h> #include <net/net_namespace.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_extend.h> #include <uapi/linux/netfilter/xt_connlabel.h> #define NF_CT_LABELS_MAX_SIZE ((XT_CONNLABEL_MAXBIT + 1) / BITS_PER_BYTE) struct nf_conn_labels { unsigned long bits[NF_CT_LABELS_MAX_SIZE / sizeof(long)]; }; /* Can't use nf_ct_ext_find(), flow dissector cannot use symbols * exported by nf_conntrack module. */ static inline struct nf_conn_labels *nf_ct_labels_find(const struct nf_conn *ct) { #ifdef CONFIG_NF_CONNTRACK_LABELS struct nf_ct_ext *ext = ct->ext; if (!ext || !__nf_ct_ext_exist(ext, NF_CT_EXT_LABELS)) return NULL; return (void *)ct->ext + ct->ext->offset[NF_CT_EXT_LABELS]; #else return NULL; #endif } static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct) { #ifdef CONFIG_NF_CONNTRACK_LABELS struct net *net = nf_ct_net(ct); if (atomic_read(&net->ct.labels_used) == 0) return NULL; return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC); #else return NULL; #endif } int nf_connlabels_replace(struct nf_conn *ct, const u32 *data, const u32 *mask, unsigned int words); #ifdef CONFIG_NF_CONNTRACK_LABELS int nf_connlabels_get(struct net *net, unsigned int bit); void nf_connlabels_put(struct net *net); #else static inline int nf_connlabels_get(struct net *net, unsigned int bit) { return 0; } static inline void nf_connlabels_put(struct net *net) {} #endif #endif /* _NF_CONNTRACK_LABELS_H */
41 19 22 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/dccp/minisocks.c * * An implementation of the DCCP protocol * Arnaldo Carvalho de Melo <acme@conectiva.com.br> */ #include <linux/dccp.h> #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/timer.h> #include <net/sock.h> #include <net/xfrm.h> #include <net/inet_timewait_sock.h> #include <net/rstreason.h> #include "ackvec.h" #include "ccid.h" #include "dccp.h" #include "feat.h" struct inet_timewait_death_row dccp_death_row = { .tw_refcount = REFCOUNT_INIT(1), .sysctl_max_tw_buckets = NR_FILE * 2, .hashinfo = &dccp_hashinfo, }; EXPORT_SYMBOL_GPL(dccp_death_row); void dccp_time_wait(struct sock *sk, int state, int timeo) { struct inet_timewait_sock *tw; tw = inet_twsk_alloc(sk, &dccp_death_row, state); if (tw != NULL) { const struct inet_connection_sock *icsk = inet_csk(sk); const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); #if IS_ENABLED(CONFIG_IPV6) if (tw->tw_family == PF_INET6) { tw->tw_v6_daddr = sk->sk_v6_daddr; tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; tw->tw_ipv6only = sk->sk_ipv6only; } #endif /* Get the TIME_WAIT timeout firing. */ if (timeo < rto) timeo = rto; if (state == DCCP_TIME_WAIT) timeo = DCCP_TIMEWAIT_LEN; /* Linkage updates. * Note that access to tw after this point is illegal. */ inet_twsk_hashdance_schedule(tw, sk, &dccp_hashinfo, timeo); } else { /* Sorry, if we're out of memory, just CLOSE this * socket up. We've got bigger problems than * non-graceful socket closings. */ DCCP_WARN("time wait bucket table overflow\n"); } dccp_done(sk); } struct sock *dccp_create_openreq_child(const struct sock *sk, const struct request_sock *req, const struct sk_buff *skb) { /* * Step 3: Process LISTEN state * * (* Generate a new socket and switch to that socket *) * Set S := new socket for this port pair */ struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); if (newsk != NULL) { struct dccp_request_sock *dreq = dccp_rsk(req); struct inet_connection_sock *newicsk = inet_csk(newsk); struct dccp_sock *newdp = dccp_sk(newsk); newdp->dccps_role = DCCP_ROLE_SERVER; newdp->dccps_hc_rx_ackvec = NULL; newdp->dccps_service_list = NULL; newdp->dccps_hc_rx_ccid = NULL; newdp->dccps_hc_tx_ccid = NULL; newdp->dccps_service = dreq->dreq_service; newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo; newdp->dccps_timestamp_time = dreq->dreq_timestamp_time; newicsk->icsk_rto = DCCP_TIMEOUT_INIT; INIT_LIST_HEAD(&newdp->dccps_featneg); /* * Step 3: Process LISTEN state * * Choose S.ISS (initial seqno) or set from Init Cookies * Initialize S.GAR := S.ISS * Set S.ISR, S.GSR from packet (or Init Cookies) * * Setting AWL/AWH and SWL/SWH happens as part of the feature * activation below, as these windows all depend on the local * and remote Sequence Window feature values (7.5.2). */ newdp->dccps_iss = dreq->dreq_iss; newdp->dccps_gss = dreq->dreq_gss; newdp->dccps_gar = newdp->dccps_iss; newdp->dccps_isr = dreq->dreq_isr; newdp->dccps_gsr = dreq->dreq_gsr; /* * Activate features: initialise CCIDs, sequence windows etc. */ if (dccp_feat_activate_values(newsk, &dreq->dreq_featneg)) { sk_free_unlock_clone(newsk); return NULL; } dccp_init_xmit_timers(newsk); __DCCP_INC_STATS(DCCP_MIB_PASSIVEOPENS); } return newsk; } EXPORT_SYMBOL_GPL(dccp_create_openreq_child); /* * Process an incoming packet for RESPOND sockets represented * as an request_sock. */ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, struct request_sock *req) { struct sock *child = NULL; struct dccp_request_sock *dreq = dccp_rsk(req); bool own_req; /* TCP/DCCP listeners became lockless. * DCCP stores complex state in its request_sock, so we need * a protection for them, now this code runs without being protected * by the parent (listener) lock. */ spin_lock_bh(&dreq->dreq_lock); /* Check for retransmitted REQUEST */ if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_gsr)) { dccp_pr_debug("Retransmitted REQUEST\n"); dreq->dreq_gsr = DCCP_SKB_CB(skb)->dccpd_seq; /* * Send another RESPONSE packet * To protect against Request floods, increment retrans * counter (backoff, monitored by dccp_response_timer). */ inet_rtx_syn_ack(sk, req); } /* Network Duplicate, discard packet */ goto out; } DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR; if (dccp_hdr(skb)->dccph_type != DCCP_PKT_ACK && dccp_hdr(skb)->dccph_type != DCCP_PKT_DATAACK) goto drop; /* Invalid ACK */ if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, dreq->dreq_iss, dreq->dreq_gss)) { dccp_pr_debug("Invalid ACK number: ack_seq=%llu, " "dreq_iss=%llu, dreq_gss=%llu\n", (unsigned long long) DCCP_SKB_CB(skb)->dccpd_ack_seq, (unsigned long long) dreq->dreq_iss, (unsigned long long) dreq->dreq_gss); goto drop; } if (dccp_parse_options(sk, dreq, skb)) goto drop; child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, req, &own_req); if (child) { child = inet_csk_complete_hashdance(sk, child, req, own_req); goto out; } DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; drop: if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) req->rsk_ops->send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED); inet_csk_reqsk_queue_drop(sk, req); out: spin_unlock_bh(&dreq->dreq_lock); return child; } EXPORT_SYMBOL_GPL(dccp_check_req); /* * Queue segment on the new socket if the new socket is active, * otherwise we just shortcircuit this and continue with * the new socket. */ int dccp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb) __releases(child) { int ret = 0; const int state = child->sk_state; if (!sock_owned_by_user(child)) { ret = dccp_rcv_state_process(child, skb, dccp_hdr(skb), skb->len); /* Wakeup parent, send SIGIO */ if (state == DCCP_RESPOND && child->sk_state != state) parent->sk_data_ready(parent); } else { /* Alas, it is possible again, because we do lookup * in main socket hash table and lock on listening * socket does not protect us more. */ __sk_add_backlog(child, skb); } bh_unlock_sock(child); sock_put(child); return ret; } EXPORT_SYMBOL_GPL(dccp_child_process); void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *rsk) { DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state"); } EXPORT_SYMBOL_GPL(dccp_reqsk_send_ack); int dccp_reqsk_in