Total coverage: 235720 (14%)of 1748704
2 2 1 1 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 // SPDX-License-Identifier: GPL-2.0 /* * PCI searching functions * * Copyright (C) 1993 -- 1997 Drew Eckhardt, Frederic Potter, * David Mosberger-Tang * Copyright (C) 1997 -- 2000 Martin Mares <mj@ucw.cz> * Copyright (C) 2003 -- 2004 Greg Kroah-Hartman <greg@kroah.com> */ #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/interrupt.h> #include "pci.h" DECLARE_RWSEM(pci_bus_sem); /* * pci_for_each_dma_alias - Iterate over DMA aliases for a device * @pdev: starting downstream device * @fn: function to call for each alias * @data: opaque data to pass to @fn * * Starting @pdev, walk up the bus calling @fn for each possible alias * of @pdev at the root bus. */ int pci_for_each_dma_alias(struct pci_dev *pdev, int (*fn)(struct pci_dev *pdev, u16 alias, void *data), void *data) { struct pci_bus *bus; int ret; /* * The device may have an explicit alias requester ID for DMA where the * requester is on another PCI bus. */ pdev = pci_real_dma_dev(pdev); ret = fn(pdev, pci_dev_id(pdev), data); if (ret) return ret; /* * If the device is broken and uses an alias requester ID for * DMA, iterate over that too. */ if (unlikely(pdev->dma_alias_mask)) { unsigned int devfn; for_each_set_bit(devfn, pdev->dma_alias_mask, MAX_NR_DEVFNS) { ret = fn(pdev, PCI_DEVID(pdev->bus->number, devfn), data); if (ret) return ret; } } for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { struct pci_dev *tmp; /* Skip virtual buses */ if (!bus->self) continue; tmp = bus->self; /* stop at bridge where translation unit is associated */ if (tmp->dev_flags & PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT) return ret; /* * PCIe-to-PCI/X bridges alias transactions from downstream * devices using the subordinate bus number (PCI Express to * PCI/PCI-X Bridge Spec, rev 1.0, sec 2.3). For all cases * where the upstream bus is PCI/X we alias to the bridge * (there are various conditions in the previous reference * where the bridge may take ownership of transactions, even * when the secondary interface is PCI-X). */ if (pci_is_pcie(tmp)) { switch (pci_pcie_type(tmp)) { case PCI_EXP_TYPE_ROOT_PORT: case PCI_EXP_TYPE_UPSTREAM: case PCI_EXP_TYPE_DOWNSTREAM: continue; case PCI_EXP_TYPE_PCI_BRIDGE: ret = fn(tmp, PCI_DEVID(tmp->subordinate->number, PCI_DEVFN(0, 0)), data); if (ret) return ret; continue; case PCI_EXP_TYPE_PCIE_BRIDGE: ret = fn(tmp, pci_dev_id(tmp), data); if (ret) return ret; continue; } } else { if (tmp->dev_flags & PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS) ret = fn(tmp, PCI_DEVID(tmp->subordinate->number, PCI_DEVFN(0, 0)), data); else ret = fn(tmp, pci_dev_id(tmp), data); if (ret) return ret; } } return ret; } static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr) { struct pci_bus *child; struct pci_bus *tmp; if (bus->number == busnr) return bus; list_for_each_entry(tmp, &bus->children, node) { child = pci_do_find_bus(tmp, busnr); if (child) return child; } return NULL; } /** * pci_find_bus - locate PCI bus from a given domain and bus number * @domain: number of PCI domain to search * @busnr: number of desired PCI bus * * Given a PCI bus number and domain number, the desired PCI bus is located * in the global list of PCI buses. If the bus is found, a pointer to its * data structure is returned. If no bus is found, %NULL is returned. */ struct pci_bus *pci_find_bus(int domain, int busnr) { struct pci_bus *bus = NULL; struct pci_bus *tmp_bus; while ((bus = pci_find_next_bus(bus)) != NULL) { if (pci_domain_nr(bus) != domain) continue; tmp_bus = pci_do_find_bus(bus, busnr); if (tmp_bus) return tmp_bus; } return NULL; } EXPORT_SYMBOL(pci_find_bus); /** * pci_find_next_bus - begin or continue searching for a PCI bus * @from: Previous PCI bus found, or %NULL for new search. * * Iterates through the list of known PCI buses. A new search is * initiated by passing %NULL as the @from argument. Otherwise if * @from is not %NULL, searches continue from next device on the * global list. */ struct pci_bus *pci_find_next_bus(const struct pci_bus *from) { struct list_head *n; struct pci_bus *b = NULL; down_read(&pci_bus_sem); n = from ? from->node.next : pci_root_buses.next; if (n != &pci_root_buses) b = list_entry(n, struct pci_bus, node); up_read(&pci_bus_sem); return b; } EXPORT_SYMBOL(pci_find_next_bus); /** * pci_get_slot - locate PCI device for a given PCI slot * @bus: PCI bus on which desired PCI device resides * @devfn: encodes number of PCI slot in which the desired PCI * device resides and the logical device number within that slot * in case of multi-function devices. * * Given a PCI bus and slot/function number, the desired PCI device * is located in the list of PCI devices. * If the device is found, its reference count is increased and this * function returns a pointer to its data structure. The caller must * decrement the reference count by calling pci_dev_put(). * If no device is found, %NULL is returned. */ struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn) { struct pci_dev *dev; down_read(&pci_bus_sem); list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->devfn == devfn) goto out; } dev = NULL; out: pci_dev_get(dev); up_read(&pci_bus_sem); return dev; } EXPORT_SYMBOL(pci_get_slot); /** * pci_get_domain_bus_and_slot - locate PCI device for a given PCI domain (segment), bus, and slot * @domain: PCI domain/segment on which the PCI device resides. * @bus: PCI bus on which desired PCI device resides * @devfn: encodes number of PCI slot in which the desired PCI device * resides and the logical device number within that slot in case of * multi-function devices. * * Given a PCI domain, bus, and slot/function number, the desired PCI * device is located in the list of PCI devices. If the device is * found, its reference count is increased and this function returns a * pointer to its data structure. The caller must decrement the * reference count by calling pci_dev_put(). If no device is found, * %NULL is returned. */ struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, unsigned int devfn) { struct pci_dev *dev = NULL; for_each_pci_dev(dev) { if (pci_domain_nr(dev->bus) == domain && (dev->bus->number == bus && dev->devfn == devfn)) return dev; } return NULL; } EXPORT_SYMBOL(pci_get_domain_bus_and_slot); static int match_pci_dev_by_id(struct device *dev, const void *data) { struct pci_dev *pdev = to_pci_dev(dev); const struct pci_device_id *id = data; if (pci_match_one_device(id, pdev)) return 1; return 0; } /* * pci_get_dev_by_id - begin or continue searching for a PCI device by id * @id: pointer to struct pci_device_id to match for the device * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is found * with a matching id a pointer to its device structure is returned, and the * reference count to the device is incremented. Otherwise, %NULL is returned. * A new search is initiated by passing %NULL as the @from argument. Otherwise * if @from is not %NULL, searches continue from next device on the global * list. The reference count for @from is always decremented if it is not * %NULL. * * This is an internal function for use by the other search functions in * this file. */ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id, struct pci_dev *from) { struct device *dev; struct device *dev_start = NULL; struct pci_dev *pdev = NULL; if (from) dev_start = &from->dev; dev = bus_find_device(&pci_bus_type, dev_start, (void *)id, match_pci_dev_by_id); if (dev) pdev = to_pci_dev(dev); pci_dev_put(from); return pdev; } /** * pci_get_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids * @ss_vendor: PCI subsystem vendor id to match, or %PCI_ANY_ID to match all vendor ids * @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is found * with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its * device structure is returned, and the reference count to the device is * incremented. Otherwise, %NULL is returned. A new search is initiated by * passing %NULL as the @from argument. Otherwise if @from is not %NULL, * searches continue from next device on the global list. * The reference count for @from is always decremented if it is not %NULL. */ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, unsigned int ss_vendor, unsigned int ss_device, struct pci_dev *from) { struct pci_device_id id = { .vendor = vendor, .device = device, .subvendor = ss_vendor, .subdevice = ss_device, }; return pci_get_dev_by_id(&id, from); } EXPORT_SYMBOL(pci_get_subsys); /** * pci_get_device - begin or continue searching for a PCI device by vendor/device id * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is * found with a matching @vendor and @device, the reference count to the * device is incremented and a pointer to its device structure is returned. * Otherwise, %NULL is returned. A new search is initiated by passing %NULL * as the @from argument. Otherwise if @from is not %NULL, searches continue * from next device on the global list. The reference count for @from is * always decremented if it is not %NULL. */ struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) { return pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); } EXPORT_SYMBOL(pci_get_device); /** * pci_get_class - begin or continue searching for a PCI device by class * @class: search for a PCI device with this class designation * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is * found with a matching @class, the reference count to the device is * incremented and a pointer to its device structure is returned. * Otherwise, %NULL is returned. * A new search is initiated by passing %NULL as the @from argument. * Otherwise if @from is not %NULL, searches continue from next device * on the global list. The reference count for @from is always decremented * if it is not %NULL. */ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) { struct pci_device_id id = { .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class_mask = PCI_ANY_ID, .class = class, }; return pci_get_dev_by_id(&id, from); } EXPORT_SYMBOL(pci_get_class); /** * pci_get_base_class - searching for a PCI device by matching against the base class code only * @class: search for a PCI device with this base class code * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is found * with a matching base class code, the reference count to the device is * incremented. See pci_match_one_device() to figure out how does this works. * A new search is initiated by passing %NULL as the @from argument. * Otherwise if @from is not %NULL, searches continue from next device on the * global list. The reference count for @from is always decremented if it is * not %NULL. * * Returns: * A pointer to a matched PCI device, %NULL Otherwise. */ struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from) { struct pci_device_id id = { .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class_mask = 0xFF0000, .class = class << 16, }; return pci_get_dev_by_id(&id, from); } EXPORT_SYMBOL(pci_get_base_class); /** * pci_dev_present - Returns 1 if device matching the device list is present, 0 if not. * @ids: A pointer to a null terminated list of struct pci_device_id structures * that describe the type of PCI device the caller is trying to find. * * Obvious fact: You do not have a reference to any device that might be found * by this function, so if that device is removed from the system right after * this function is finished, the value will be stale. Use this function to * find devices that are usually built into a system, or for a general hint as * to if another device happens to be present at this specific moment in time. */ int pci_dev_present(const struct pci_device_id *ids) { struct pci_dev *found = NULL; while (ids->vendor || ids->subvendor || ids->class_mask) { found = pci_get_dev_by_id(ids, NULL); if (found) { pci_dev_put(found); return 1; } ids++; } return 0; } EXPORT_SYMBOL(pci_dev_present);
2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 // SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for the Auvitek USB bridge * * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org> */ #include "au0828.h" #include "au8522.h" #include <linux/module.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> #include <linux/mutex.h> /* Due to enum tuner_pad_index */ #include <media/tuner.h> /* * 1 = General debug messages * 2 = USB handling * 4 = I2C related * 8 = Bridge related * 16 = IR related */ int au0828_debug; module_param_named(debug, au0828_debug, int, 0644); MODULE_PARM_DESC(debug, "set debug bitmask: 1=general, 2=USB, 4=I2C, 8=bridge, 16=IR"); static unsigned int disable_usb_speed_check; module_param(disable_usb_speed_check, int, 0444); MODULE_PARM_DESC(disable_usb_speed_check, "override min bandwidth requirement of 480M bps"); #define _AU0828_BULKPIPE 0x03 #define _BULKPIPESIZE 0xffff static int send_control_msg(struct au0828_dev *dev, u16 request, u32 value, u16 index); static int recv_control_msg(struct au0828_dev *dev, u16 request, u32 value, u16 index, unsigned char *cp, u16 size); /* USB Direction */ #define CMD_REQUEST_IN 0x00 #define CMD_REQUEST_OUT 0x01 u32 au0828_readreg(struct au0828_dev *dev, u16 reg) { u8 result = 0; recv_control_msg(dev, CMD_REQUEST_IN, 0, reg, &result, 1); dprintk(8, "%s(0x%04x) = 0x%02x\n", __func__, reg, result); return result; } u32 au0828_writereg(struct au0828_dev *dev, u16 reg, u32 val) { dprintk(8, "%s(0x%04x, 0x%02x)\n", __func__, reg, val); return send_control_msg(dev, CMD_REQUEST_OUT, val, reg); } static int send_control_msg(struct au0828_dev *dev, u16 request, u32 value, u16 index) { int status = -ENODEV; if (dev->usbdev) { /* cp must be memory that has been allocated by kmalloc */ status = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), request, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 1000); status = min(status, 0); if (status < 0) { pr_err("%s() Failed sending control message, error %d.\n", __func__, status); } } return status; } static int recv_control_msg(struct au0828_dev *dev, u16 request, u32 value, u16 index, unsigned char *cp, u16 size) { int status = -ENODEV; mutex_lock(&dev->mutex); if (dev->usbdev) { status = usb_control_msg(dev->usbdev, usb_rcvctrlpipe(dev->usbdev, 0), request, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, dev->ctrlmsg, size, 1000); status = min(status, 0); if (status < 0) { pr_err("%s() Failed receiving control message, error %d.\n", __func__, status); } /* the host controller requires heap allocated memory, which is why we didn't just pass "cp" into usb_control_msg */ memcpy(cp, dev->ctrlmsg, size); } mutex_unlock(&dev->mutex); return status; } #ifdef CONFIG_MEDIA_CONTROLLER static void au0828_media_graph_notify(struct media_entity *new, void *notify_data); #endif static void au0828_unregister_media_device(struct au0828_dev *dev) { #ifdef CONFIG_MEDIA_CONTROLLER struct media_device *mdev = dev->media_dev; struct media_entity_notify *notify, *nextp; if (!mdev || !media_devnode_is_registered(mdev->devnode)) return; /* Remove au0828 entity_notify callbacks */ list_for_each_entry_safe(notify, nextp, &mdev->entity_notify, list) { if (notify->notify != au0828_media_graph_notify) continue; media_device_unregister_entity_notify(mdev, notify); } /* clear enable_source, disable_source */ mutex_lock(&mdev->graph_mutex); dev->media_dev->source_priv = NULL; dev->media_dev->enable_source = NULL; dev->media_dev->disable_source = NULL; mutex_unlock(&mdev->graph_mutex); media_device_delete(dev->media_dev, KBUILD_MODNAME, THIS_MODULE); dev->media_dev = NULL; #endif } void au0828_usb_release(struct au0828_dev *dev) { au0828_unregister_media_device(dev); /* I2C */ au0828_i2c_unregister(dev); kfree(dev); } static void au0828_usb_disconnect(struct usb_interface *interface) { struct au0828_dev *dev = usb_get_intfdata(interface); dprintk(1, "%s()\n", __func__); /* there is a small window after disconnect, before dev->usbdev is NULL, for poll (e.g: IR) try to access the device and fill the dmesg with error messages. Set the status so poll routines can check and avoid access after disconnect. */ set_bit(DEV_DISCONNECTED, &dev->dev_state); au0828_rc_unregister(dev); /* Digital TV */ au0828_dvb_unregister(dev); usb_set_intfdata(interface, NULL); mutex_lock(&dev->mutex); dev->usbdev = NULL; mutex_unlock(&dev->mutex); if (au0828_analog_unregister(dev)) { /* * No need to call au0828_usb_release() if V4L2 is enabled, * as this is already called via au0828_usb_v4l2_release() */ return; } au0828_usb_release(dev); } static int au0828_media_device_init(struct au0828_dev *dev, struct usb_device *udev) { #ifdef CONFIG_MEDIA_CONTROLLER struct media_device *mdev; mdev = media_device_usb_allocate(udev, KBUILD_MODNAME, THIS_MODULE); if (IS_ERR(mdev)) return PTR_ERR(mdev); dev->media_dev = mdev; #endif return 0; } #ifdef CONFIG_MEDIA_CONTROLLER static void au0828_media_graph_notify(struct media_entity *new, void *notify_data) { struct au0828_dev *dev = notify_data; int ret; struct media_entity *entity, *mixer = NULL, *decoder = NULL; if (!new) { /* * Called during au0828 probe time to connect * entities that were created prior to registering * the notify handler. Find mixer and decoder. */ media_device_for_each_entity(entity, dev->media_dev) { if (entity->function == MEDIA_ENT_F_AUDIO_MIXER) mixer = entity; else if (entity->function == MEDIA_ENT_F_ATV_DECODER) decoder = entity; } goto create_link; } switch (new->function) { case MEDIA_ENT_F_AUDIO_MIXER: mixer = new; if (dev->decoder) decoder = dev->decoder; break; case MEDIA_ENT_F_ATV_DECODER: /* In case, Mixer is added first, find mixer and create link */ media_device_for_each_entity(entity, dev->media_dev) { if (entity->function == MEDIA_ENT_F_AUDIO_MIXER) mixer = entity; } decoder = new; break; default: break; } create_link: if (decoder && mixer) { ret = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE, PAD_SIGNAL_AUDIO); if (ret >= 0) ret = media_create_pad_link(decoder, ret, mixer, 0, MEDIA_LNK_FL_ENABLED); if (ret < 0) dev_err(&dev->usbdev->dev, "Mixer Pad Link Create Error: %d\n", ret); } } static bool au0828_is_link_shareable(struct media_entity *owner, struct media_entity *entity) { bool shareable = false; /* Tuner link can be shared by audio, video, and VBI */ switch (owner->function) { case MEDIA_ENT_F_IO_V4L: case MEDIA_ENT_F_AUDIO_CAPTURE: case MEDIA_ENT_F_IO_VBI: if (entity->function == MEDIA_ENT_F_IO_V4L || entity->function == MEDIA_ENT_F_AUDIO_CAPTURE || entity->function == MEDIA_ENT_F_IO_VBI) shareable = true; break; case MEDIA_ENT_F_DTV_DEMOD: default: break; } return shareable; } /* Callers should hold graph_mutex */ static int au0828_enable_source(struct media_entity *entity, struct media_pipeline *pipe) { struct media_entity *source, *find_source; struct media_entity *sink; struct media_link *link, *found_link = NULL; int ret = 0; struct media_device *mdev = entity->graph_obj.mdev; struct au0828_dev *dev; if (!mdev) return -ENODEV; dev = mdev->source_priv; /* * For Audio and V4L2 entity, find the link to which decoder * is the sink. Look for an active link between decoder and * source (tuner/s-video/Composite), if one exists, nothing * to do. If not, look for any active links between source * and any other entity. If one exists, source is busy. If * source is free, setup link and start pipeline from source. * For DVB FE entity, the source for the link is the tuner. * Check if tuner is available and setup link and start * pipeline. */ if (entity->function == MEDIA_ENT_F_DTV_DEMOD) { sink = entity; find_source = dev->tuner; } else { /* Analog isn't configured or register failed */ if (!dev->decoder) { ret = -ENODEV; goto end; } sink = dev->decoder; /* * Default input is tuner and default input_type * is AU0828_VMUX_TELEVISION. * * There is a problem when s_input is called to * change the default input. s_input will try to * enable_source before attempting to change the * input on the device, and will end up enabling * default source which is tuner. * * Additional logic is necessary in au0828 to detect * that the input has changed and enable the right * source. au0828 handles this case in its s_input. * It will disable the old source and enable the new * source. * */ if (dev->input_type == AU0828_VMUX_TELEVISION) find_source = dev->tuner; else if (dev->input_type == AU0828_VMUX_SVIDEO || dev->input_type == AU0828_VMUX_COMPOSITE) find_source = &dev->input_ent[dev->input_type]; else { /* unknown input - let user select input */ ret = 0; goto end; } } /* Is there an active link between sink and source */ if (dev->active_link) { if (dev->active_link_owner == entity) { /* This check is necessary to handle multiple * enable_source calls from v4l_ioctls during * the course of video/vbi application run-time. */ pr_debug("%s already owns the tuner\n", entity->name); ret = 0; goto end; } else if (au0828_is_link_shareable(dev->active_link_owner, entity)) { /* Either ALSA or Video own tuner. Sink is the same * for both. Allow sharing the active link between * their common source (tuner) and sink (decoder). * Starting pipeline between sharing entity and sink * will fail with pipe mismatch, while owner has an * active pipeline. Switch pipeline ownership from * user to owner when owner disables the source. */ dev->active_link_shared = true; /* save the user info to use from disable */ dev->active_link_user = entity; dev->active_link_user_pipe = pipe; pr_debug("%s owns the tuner %s can share!\n", dev->active_link_owner->name, entity->name); ret = 0; goto end; } else { ret = -EBUSY; goto end; } } list_for_each_entry(link, &sink->links, list) { /* Check sink, and source */ if (link->sink->entity == sink && link->source->entity == find_source) { found_link = link; break; } } if (!found_link) { ret = -ENODEV; goto end; } /* activate link between source and sink and start pipeline */ source = found_link->source->entity; ret = __media_entity_setup_link(found_link, MEDIA_LNK_FL_ENABLED); if (ret) { pr_err("Activate link from %s->%s. Error %d\n", source->name, sink->name, ret); goto end; } ret = __media_pipeline_start(entity->pads, pipe); if (ret) { pr_err("Start Pipeline: %s->%s Error %d\n", source->name, entity->name, ret); ret = __media_entity_setup_link(found_link, 0); if (ret) pr_err("Deactivate link Error %d\n", ret); goto end; } /* save link state to allow audio and video share the link * and not disable the link while the other is using it. * active_link_owner is used to deactivate the link. */ dev->active_link = found_link; dev->active_link_owner = entity; dev->active_source = source; dev->active_sink = sink; pr_info("Enabled Source: %s->%s->%s Ret %d\n", dev->active_source->name, dev->active_sink->name, dev->active_link_owner->name, ret); end: pr_debug("%s end: ent:%s fnc:%d ret %d\n", __func__, entity->name, entity->function, ret); return ret; } /* Callers should hold graph_mutex */ static void au0828_disable_source(struct media_entity *entity) { int ret = 0; struct media_device *mdev = entity->graph_obj.mdev; struct au0828_dev *dev; if (!mdev) return; dev = mdev->source_priv; if (!dev->active_link) return; /* link is active - stop pipeline from source * (tuner/s-video/Composite) to the entity * When DVB/s-video/Composite owns tuner, it won't be in * shared state. */ if (dev->active_link->sink->entity == dev->active_sink && dev->active_link->source->entity == dev->active_source) { /* * Prevent video from deactivating link when audio * has active pipeline and vice versa. In addition * handle the case when more than one video/vbi * application is sharing the link. */ bool owner_is_audio = false; if (dev->active_link_owner->function == MEDIA_ENT_F_AUDIO_CAPTURE) owner_is_audio = true; if (dev->active_link_shared) { pr_debug("Shared link owner %s user %s %d\n", dev->active_link_owner->name, entity->name, dev->users); /* Handle video device users > 1 * When audio owns the shared link with * more than one video users, avoid * disabling the source and/or switching * the owner until the last disable_source * call from video _close(). Use dev->users to * determine when to switch/disable. */ if (dev->active_link_owner != entity) { /* video device has users > 1 */ if (owner_is_audio && dev->users > 1) return; dev->active_link_user = NULL; dev->active_link_user_pipe = NULL; dev->active_link_shared = false; return; } /* video owns the link and has users > 1 */ if (!owner_is_audio && dev->users > 1) return; /* stop pipeline */ __media_pipeline_stop(dev->active_link_owner->pads); pr_debug("Pipeline stop for %s\n", dev->active_link_owner->name); ret = __media_pipeline_start( dev->active_link_user->pads, dev->active_link_user_pipe); if (ret) { pr_err("Start Pipeline: %s->%s %d\n", dev->active_source->name, dev->active_link_user->name, ret); goto deactivate_link; } /* link user is now the owner */ dev->active_link_owner = dev->active_link_user; dev->active_link_user = NULL; dev->active_link_user_pipe = NULL; dev->active_link_shared = false; pr_debug("Pipeline started for %s\n", dev->active_link_owner->name); return; } else if (!owner_is_audio && dev->users > 1) /* video/vbi owns the link and has users > 1 */ return; if (dev->active_link_owner != entity) return; /* stop pipeline */ __media_pipeline_stop(dev->active_link_owner->pads); pr_debug("Pipeline stop for %s\n", dev->active_link_owner->name); deactivate_link: ret = __media_entity_setup_link(dev->active_link, 0); if (ret) pr_err("Deactivate link Error %d\n", ret); pr_info("Disabled Source: %s->%s->%s Ret %d\n", dev->active_source->name, dev->active_sink->name, dev->active_link_owner->name, ret); dev->active_link = NULL; dev->active_link_owner = NULL; dev->active_source = NULL; dev->active_sink = NULL; dev->active_link_shared = false; dev->active_link_user = NULL; } } #endif static int au0828_media_device_register(struct au0828_dev *dev, struct usb_device *udev) { #ifdef CONFIG_MEDIA_CONTROLLER int ret; struct media_entity *entity, *demod = NULL; struct media_link *link; if (!dev->media_dev) return 0; if (!media_devnode_is_registered(dev->media_dev->devnode)) { /* register media device */ ret = media_device_register(dev->media_dev); if (ret) { media_device_delete(dev->media_dev, KBUILD_MODNAME, THIS_MODULE); dev->media_dev = NULL; dev_err(&udev->dev, "Media Device Register Error: %d\n", ret); return ret; } } else { /* * Call au0828_media_graph_notify() to connect * audio graph to our graph. In this case, audio * driver registered the device and there is no * entity_notify to be called when new entities * are added. Invoke it now. */ au0828_media_graph_notify(NULL, (void *) dev); } /* * Find tuner, decoder and demod. * * The tuner and decoder should be cached, as they'll be used by * au0828_enable_source. * * It also needs to disable the link between tuner and * decoder/demod, to avoid disable step when tuner is requested * by video or audio. Note that this step can't be done until dvb * graph is created during dvb register. */ media_device_for_each_entity(entity, dev->media_dev) { switch (entity->function) { case MEDIA_ENT_F_TUNER: dev->tuner = entity; break; case MEDIA_ENT_F_ATV_DECODER: dev->decoder = entity; break; case MEDIA_ENT_F_DTV_DEMOD: demod = entity; break; } } /* Disable link between tuner->demod and/or tuner->decoder */ if (dev->tuner) { list_for_each_entry(link, &dev->tuner->links, list) { if (demod && link->sink->entity == demod) media_entity_setup_link(link, 0); if (dev->decoder && link->sink->entity == dev->decoder) media_entity_setup_link(link, 0); } } /* register entity_notify callback */ dev->entity_notify.notify_data = (void *) dev; dev->entity_notify.notify = (void *) au0828_media_graph_notify; media_device_register_entity_notify(dev->media_dev, &dev->entity_notify); /* set enable_source */ mutex_lock(&dev->media_dev->graph_mutex); dev->media_dev->source_priv = (void *) dev; dev->media_dev->enable_source = au0828_enable_source; dev->media_dev->disable_source = au0828_disable_source; mutex_unlock(&dev->media_dev->graph_mutex); #endif return 0; } static int au0828_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { int ifnum; int retval = 0; struct au0828_dev *dev; struct usb_device *usbdev = interface_to_usbdev(interface); ifnum = interface->altsetting->desc.bInterfaceNumber; if (ifnum != 0) return -ENODEV; dprintk(1, "%s() vendor id 0x%x device id 0x%x ifnum:%d\n", __func__, le16_to_cpu(usbdev->descriptor.idVendor), le16_to_cpu(usbdev->descriptor.idProduct), ifnum); /* * Make sure we have 480 Mbps of bandwidth, otherwise things like * video stream wouldn't likely work, since 12 Mbps is generally * not enough even for most Digital TV streams. */ if (usbdev->speed != USB_SPEED_HIGH && disable_usb_speed_check == 0) { pr_err("au0828: Device initialization failed.\n"); pr_err("au0828: Device must be connected to a high-speed USB 2.0 port.\n"); return -ENODEV; } dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { pr_err("%s() Unable to allocate memory\n", __func__); return -ENOMEM; } mutex_init(&dev->lock); mutex_lock(&dev->lock); mutex_init(&dev->mutex); mutex_init(&dev->dvb.lock); dev->usbdev = usbdev; dev->boardnr = id->driver_info; dev->board = au0828_boards[dev->boardnr]; /* Initialize the media controller */ retval = au0828_media_device_init(dev, usbdev); if (retval) { pr_err("%s() au0828_media_device_init failed\n", __func__); mutex_unlock(&dev->lock); kfree(dev); return retval; } retval = au0828_v4l2_device_register(interface, dev); if (retval) { au0828_usb_v4l2_media_release(dev); mutex_unlock(&dev->lock); kfree(dev); return retval; } /* Power Up the bridge */ au0828_write(dev, REG_600, 1 << 4); /* Bring up the GPIO's and supporting devices */ au0828_gpio_setup(dev); /* I2C */ au0828_i2c_register(dev); /* Setup */ au0828_card_setup(dev); /* * Store the pointer to the au0828_dev so it can be accessed in * au0828_usb_disconnect */ usb_set_intfdata(interface, dev); /* Analog TV */ retval = au0828_analog_register(dev, interface); if (retval) { pr_err("%s() au0828_analog_register failed to register on V4L2\n", __func__); mutex_unlock(&dev->lock); goto done; } /* Digital TV */ retval = au0828_dvb_register(dev); if (retval) pr_err("%s() au0828_dvb_register failed\n", __func__); /* Remote controller */ au0828_rc_register(dev); pr_info("Registered device AU0828 [%s]\n", dev->board.name == NULL ? "Unset" : dev->board.name); mutex_unlock(&dev->lock); retval = au0828_media_device_register(dev, usbdev); done: if (retval < 0) au0828_usb_disconnect(interface); return retval; } static int au0828_suspend(struct usb_interface *interface, pm_message_t message) { struct au0828_dev *dev = usb_get_intfdata(interface); if (!dev) return 0; pr_info("Suspend\n"); au0828_rc_suspend(dev); au0828_v4l2_suspend(dev); au0828_dvb_suspend(dev); /* FIXME: should suspend also ATV/DTV */ return 0; } static int au0828_resume(struct usb_interface *interface) { struct au0828_dev *dev = usb_get_intfdata(interface); if (!dev) return 0; pr_info("Resume\n"); /* Power Up the bridge */ au0828_write(dev, REG_600, 1 << 4); /* Bring up the GPIO's and supporting devices */ au0828_gpio_setup(dev); au0828_rc_resume(dev); au0828_v4l2_resume(dev); au0828_dvb_resume(dev); /* FIXME: should resume also ATV/DTV */ return 0; } static struct usb_driver au0828_usb_driver = { .name = KBUILD_MODNAME, .probe = au0828_usb_probe, .disconnect = au0828_usb_disconnect, .id_table = au0828_usb_id_table, .suspend = au0828_suspend, .resume = au0828_resume, .reset_resume = au0828_resume, }; static int __init au0828_init(void) { int ret; if (au0828_debug & 1) pr_info("%s() Debugging is enabled\n", __func__); if (au0828_debug & 2) pr_info("%s() USB Debugging is enabled\n", __func__); if (au0828_debug & 4) pr_info("%s() I2C Debugging is enabled\n", __func__); if (au0828_debug & 8) pr_info("%s() Bridge Debugging is enabled\n", __func__); if (au0828_debug & 16) pr_info("%s() IR Debugging is enabled\n", __func__); pr_info("au0828 driver loaded\n"); ret = usb_register(&au0828_usb_driver); if (ret) pr_err("usb_register failed, error = %d\n", ret); return ret; } static void __exit au0828_exit(void) { usb_deregister(&au0828_usb_driver); } module_init(au0828_init); module_exit(au0828_exit); MODULE_DESCRIPTION("Driver for Auvitek AU0828 based products"); MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.0.3");
97 10 10 10 302 1 299 300 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2021 Oracle Corporation */ #include <linux/slab.h> #include <linux/completion.h> #include <linux/sched/task.h> #include <linux/sched/vhost_task.h> #include <linux/sched/signal.h> enum vhost_task_flags { VHOST_TASK_FLAGS_STOP, VHOST_TASK_FLAGS_KILLED, }; struct vhost_task { bool (*fn)(void *data); void (*handle_sigkill)(void *data); void *data; struct completion exited; unsigned long flags; struct task_struct *task; /* serialize SIGKILL and vhost_task_stop calls */ struct mutex exit_mutex; }; static int vhost_task_fn(void *data) { struct vhost_task *vtsk = data; for (;;) { bool did_work; if (signal_pending(current)) { struct ksignal ksig; if (get_signal(&ksig)) break; } /* mb paired w/ vhost_task_stop */ set_current_state(TASK_INTERRUPTIBLE); if (test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags)) { __set_current_state(TASK_RUNNING); break; } did_work = vtsk->fn(vtsk->data); if (!did_work) schedule(); } mutex_lock(&vtsk->exit_mutex); /* * If a vhost_task_stop and SIGKILL race, we can ignore the SIGKILL. * When the vhost layer has called vhost_task_stop it's already stopped * new work and flushed. */ if (!test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags)) { set_bit(VHOST_TASK_FLAGS_KILLED, &vtsk->flags); vtsk->handle_sigkill(vtsk->data); } mutex_unlock(&vtsk->exit_mutex); complete(&vtsk->exited); do_exit(0); } /** * vhost_task_wake - wakeup the vhost_task * @vtsk: vhost_task to wake * * wake up the vhost_task worker thread */ void vhost_task_wake(struct vhost_task *vtsk) { wake_up_process(vtsk->task); } EXPORT_SYMBOL_GPL(vhost_task_wake); /** * vhost_task_stop - stop a vhost_task * @vtsk: vhost_task to stop * * vhost_task_fn ensures the worker thread exits after * VHOST_TASK_FLAGS_STOP becomes true. */ void vhost_task_stop(struct vhost_task *vtsk) { mutex_lock(&vtsk->exit_mutex); if (!test_bit(VHOST_TASK_FLAGS_KILLED, &vtsk->flags)) { set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags); vhost_task_wake(vtsk); } mutex_unlock(&vtsk->exit_mutex); /* * Make sure vhost_task_fn is no longer accessing the vhost_task before * freeing it below. */ wait_for_completion(&vtsk->exited); put_task_struct(vtsk->task); kfree(vtsk); } EXPORT_SYMBOL_GPL(vhost_task_stop); /** * vhost_task_create - create a copy of a task to be used by the kernel * @fn: vhost worker function * @handle_sigkill: vhost function to handle when we are killed * @arg: data to be passed to fn and handled_kill * @name: the thread's name * * This returns a specialized task for use by the vhost layer or ERR_PTR() on * failure. The returned task is inactive, and the caller must fire it up * through vhost_task_start(). */ struct vhost_task *vhost_task_create(bool (*fn)(void *), void (*handle_sigkill)(void *), void *arg, const char *name) { struct kernel_clone_args args = { .flags = CLONE_FS | CLONE_UNTRACED | CLONE_VM | CLONE_THREAD | CLONE_SIGHAND, .exit_signal = 0, .fn = vhost_task_fn, .name = name, .user_worker = 1, .no_files = 1, }; struct vhost_task *vtsk; struct task_struct *tsk; vtsk = kzalloc(sizeof(*vtsk), GFP_KERNEL); if (!vtsk) return ERR_PTR(-ENOMEM); init_completion(&vtsk->exited); mutex_init(&vtsk->exit_mutex); vtsk->data = arg; vtsk->fn = fn; vtsk->handle_sigkill = handle_sigkill; args.fn_arg = vtsk; tsk = copy_process(NULL, 0, NUMA_NO_NODE, &args); if (IS_ERR(tsk)) { kfree(vtsk); return ERR_CAST(tsk); } vtsk->task = get_task_struct(tsk); return vtsk; } EXPORT_SYMBOL_GPL(vhost_task_create); /** * vhost_task_start - start a vhost_task created with vhost_task_create * @vtsk: vhost_task to wake up */ void vhost_task_start(struct vhost_task *vtsk) { wake_up_new_task(vtsk->task); } EXPORT_SYMBOL_GPL(vhost_task_start);
153 28 1 88 7 1 13 1 6 81 25 2 184 185 185 1270 1269 100 98 1150 127 188 1268 1263 7 1268 1 1268 2 147 1 1 2 1 1 1 3 1 1 1 1 1 2 2 2 1 3 2 1 1 2 1 1 1 146 148 147 147 7 88 88 14 89 7 14 88 2 2 2 2 106 106 98 12 106 106 41 24 95 7 99 8 98 90 11 16 88 2 85 7 97 2 95 1 95 1 7 91 17 95 87 8 94 95 73 22 17 17 13 10 21 6 9 15 2 3 5 1 34 3 2 17 3 6 6 4 3 12 74 36 21 4 5 1 29 22 14 1 1 2 1 1 6 29 21 29 29 29 29 29 29 29 29 29 28 29 29 29 28 29 29 28 29 1 27 28 24 4 22 5 21 5 22 3 20 22 24 20 2 2 19 1 2 19 1 57 2 55 44 9 2 2 10 2 52 32 20 24 2 22 22 20 2 87 79 1 7 4 81 68 20 20 109 109 70 81 3 75 3 76 2 75 2 67 8 64 70 5 69 71 67 70 73 69 1 4 65 2 2 2 2 65 2 60 61 64 2 61 65 1 65 1 65 1 63 3 64 1 65 1 63 2 63 3 61 5 62 4 62 4 61 58 58 3 55 6 58 3 47 57 2 65 61 5 52 14 118 156 2 3 1 2 1 1 1 2 1 1 1 2 1 1 1 2 1 2 1 2 4 1 1 1 1 1 1 2 1 157 2 1 1 1 2 2 10 7 4 4 2 1 2 10 10 10 7 1 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 // SPDX-License-Identifier: GPL-2.0-or-later /* * Bridge netlink control interface * * Authors: * Stephen Hemminger <shemminger@osdl.org> */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/etherdevice.h> #include <net/rtnetlink.h> #include <net/net_namespace.h> #include <net/sock.h> #include <uapi/linux/if_bridge.h> #include "br_private.h" #include "br_private_stp.h" #include "br_private_cfm.h" #include "br_private_tunnel.h" #include "br_private_mcast_eht.h" static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg, u32 filter_mask) { struct net_bridge_vlan *v; u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; u16 flags, pvid; int num_vlans = 0; if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) return 0; pvid = br_get_pvid(vg); /* Count number of vlan infos */ list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { flags = 0; /* only a context, bridge vlan not activated */ if (!br_vlan_should_use(v)) continue; if (v->vid == pvid) flags |= BRIDGE_VLAN_INFO_PVID; if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) flags |= BRIDGE_VLAN_INFO_UNTAGGED; if (vid_range_start == 0) { goto initvars; } else if ((v->vid - vid_range_end) == 1 && flags == vid_range_flags) { vid_range_end = v->vid; continue; } else { if ((vid_range_end - vid_range_start) > 0) num_vlans += 2; else num_vlans += 1; } initvars: vid_range_start = v->vid; vid_range_end = v->vid; vid_range_flags = flags; } if (vid_range_start != 0) { if ((vid_range_end - vid_range_start) > 0) num_vlans += 2; else num_vlans += 1; } return num_vlans; } static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg, u32 filter_mask) { int num_vlans; if (!vg) return 0; if (filter_mask & RTEXT_FILTER_BRVLAN) return vg->num_vlans; rcu_read_lock(); num_vlans = __get_num_vlan_infos(vg, filter_mask); rcu_read_unlock(); return num_vlans; } static size_t br_get_link_af_size_filtered(const struct net_device *dev, u32 filter_mask) { struct net_bridge_vlan_group *vg = NULL; struct net_bridge_port *p = NULL; struct net_bridge *br = NULL; u32 num_cfm_peer_mep_infos; u32 num_cfm_mep_infos; size_t vinfo_sz = 0; int num_vlan_infos; rcu_read_lock(); if (netif_is_bridge_port(dev)) { p = br_port_get_check_rcu(dev); if (p) vg = nbp_vlan_group_rcu(p); } else if (netif_is_bridge_master(dev)) { br = netdev_priv(dev); vg = br_vlan_group_rcu(br); } num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask); rcu_read_unlock(); if (p && (p->flags & BR_VLAN_TUNNEL)) vinfo_sz += br_get_vlan_tunnel_info_size(vg); /* Each VLAN is returned in bridge_vlan_info along with flags */ vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info)); if (p && vg && (filter_mask & RTEXT_FILTER_MST)) vinfo_sz += br_mst_info_size(vg); if (!(filter_mask & RTEXT_FILTER_CFM_STATUS)) return vinfo_sz; if (!br) return vinfo_sz; /* CFM status info must be added */ br_cfm_mep_count(br, &num_cfm_mep_infos); br_cfm_peer_mep_count(br, &num_cfm_peer_mep_infos); vinfo_sz += nla_total_size(0); /* IFLA_BRIDGE_CFM */ /* For each status struct the MEP instance (u32) is added */ /* MEP instance (u32) + br_cfm_mep_status */ vinfo_sz += num_cfm_mep_infos * /*IFLA_BRIDGE_CFM_MEP_STATUS_INSTANCE */ (nla_total_size(sizeof(u32)) /* IFLA_BRIDGE_CFM_MEP_STATUS_OPCODE_UNEXP_SEEN */ + nla_total_size(sizeof(u32)) /* IFLA_BRIDGE_CFM_MEP_STATUS_VERSION_UNEXP_SEEN */ + nla_total_size(sizeof(u32)) /* IFLA_BRIDGE_CFM_MEP_STATUS_RX_LEVEL_LOW_SEEN */ + nla_total_size(sizeof(u32))); /* MEP instance (u32) + br_cfm_cc_peer_status */ vinfo_sz += num_cfm_peer_mep_infos * /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_INSTANCE */ (nla_total_size(sizeof(u32)) /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_PEER_MEPID */ + nla_total_size(sizeof(u32)) /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_CCM_DEFECT */ + nla_total_size(sizeof(u32)) /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_RDI */ + nla_total_size(sizeof(u32)) /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_PORT_TLV_VALUE */ + nla_total_size(sizeof(u8)) /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_IF_TLV_VALUE */ + nla_total_size(sizeof(u8)) /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEEN */ + nla_total_size(sizeof(u32)) /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_TLV_SEEN */ + nla_total_size(sizeof(u32)) /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEQ_UNEXP_SEEN */ + nla_total_size(sizeof(u32))); return vinfo_sz; } static inline size_t br_port_info_size(void) { return nla_total_size(1) /* IFLA_BRPORT_STATE */ + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */ + nla_total_size(4) /* IFLA_BRPORT_COST */ + nla_total_size(1) /* IFLA_BRPORT_MODE */ + nla_total_size(1) /* IFLA_BRPORT_GUARD */ + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */ + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ + nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */ + nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */ + nla_total_size(1) /* IFLA_BRPORT_NEIGH_SUPPRESS */ + nla_total_size(1) /* IFLA_BRPORT_ISOLATED */ + nla_total_size(1) /* IFLA_BRPORT_LOCKED */ + nla_total_size(1) /* IFLA_BRPORT_MAB */ + nla_total_size(1) /* IFLA_BRPORT_NEIGH_VLAN_SUPPRESS */ + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */ + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */ + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */ + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */ + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */ + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */ + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */ + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */ + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */ + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */ + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */ + nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_N_GROUPS */ + nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_MAX_GROUPS */ #endif + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */ + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_RING_OPEN */ + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_IN_OPEN */ + nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT */ + nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_EHT_HOSTS_CNT */ + nla_total_size(sizeof(u32)) /* IFLA_BRPORT_BACKUP_NHID */ + 0; } static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask) { return NLMSG_ALIGN(sizeof(struct ifinfomsg)) + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ + nla_total_size(4) /* IFLA_MASTER */ + nla_total_size(4) /* IFLA_MTU */ + nla_total_size(4) /* IFLA_LINK */ + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */ + nla_total_size(br_get_link_af_size_filtered(dev, filter_mask)) /* IFLA_AF_SPEC */ + nla_total_size(4); /* IFLA_BRPORT_BACKUP_PORT */ } static int br_port_fill_attrs(struct sk_buff *skb, const struct net_bridge_port *p) { u8 mode = !!(p->flags & BR_HAIRPIN_MODE); struct net_bridge_port *backup_p; u64 timerval; if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) || nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) || nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) || nla_put_u8(skb, IFLA_BRPORT_MODE, mode) || nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) || nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) || nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST, !!(p->flags & BR_MULTICAST_TO_UNICAST)) || nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) || nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) || nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD, !!(p->flags & BR_MCAST_FLOOD)) || nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD, !!(p->flags & BR_BCAST_FLOOD)) || nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) || nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI, !!(p->flags & BR_PROXYARP_WIFI)) || nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id), &p->designated_root) || nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id), &p->designated_bridge) || nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) || nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) || nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) || nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) || nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, p->topology_change_ack) || nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) || nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags & BR_VLAN_TUNNEL)) || nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) || nla_put_u8(skb, IFLA_BRPORT_NEIGH_SUPPRESS, !!(p->flags & BR_NEIGH_SUPPRESS)) || nla_put_u8(skb, IFLA_BRPORT_MRP_RING_OPEN, !!(p->flags & BR_MRP_LOST_CONT)) || nla_put_u8(skb, IFLA_BRPORT_MRP_IN_OPEN, !!(p->flags & BR_MRP_LOST_IN_CONT)) || nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)) || nla_put_u8(skb, IFLA_BRPORT_LOCKED, !!(p->flags & BR_PORT_LOCKED)) || nla_put_u8(skb, IFLA_BRPORT_MAB, !!(p->flags & BR_PORT_MAB)) || nla_put_u8(skb, IFLA_BRPORT_NEIGH_VLAN_SUPPRESS, !!(p->flags & BR_NEIGH_VLAN_SUPPRESS))) return -EMSGSIZE; timerval = br_timer_value(&p->message_age_timer); if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval, IFLA_BRPORT_PAD)) return -EMSGSIZE; timerval = br_timer_value(&p->forward_delay_timer); if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval, IFLA_BRPORT_PAD)) return -EMSGSIZE; timerval = br_timer_value(&p->hold_timer); if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval, IFLA_BRPORT_PAD)) return -EMSGSIZE; #ifdef CONFIG_BRIDGE_IGMP_SNOOPING if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER, p->multicast_ctx.multicast_router) || nla_put_u32(skb, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT, p->multicast_eht_hosts_limit) || nla_put_u32(skb, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT, p->multicast_eht_hosts_cnt) || nla_put_u32(skb, IFLA_BRPORT_MCAST_N_GROUPS, br_multicast_ngroups_get(&p->multicast_ctx)) || nla_put_u32(skb, IFLA_BRPORT_MCAST_MAX_GROUPS, br_multicast_ngroups_get_max(&p->multicast_ctx))) return -EMSGSIZE; #endif /* we might be called only with br->lock */ rcu_read_lock(); backup_p = rcu_dereference(p->backup_port); if (backup_p) nla_put_u32(skb, IFLA_BRPORT_BACKUP_PORT, backup_p->dev->ifindex); rcu_read_unlock(); if (p->backup_nhid && nla_put_u32(skb, IFLA_BRPORT_BACKUP_NHID, p->backup_nhid)) return -EMSGSIZE; return 0; } static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start, u16 vid_end, u16 flags) { struct bridge_vlan_info vinfo; if ((vid_end - vid_start) > 0) { /* add range to skb */ vinfo.vid = vid_start; vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN; if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, sizeof(vinfo), &vinfo)) goto nla_put_failure; vinfo.vid = vid_end; vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END; if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, sizeof(vinfo), &vinfo)) goto nla_put_failure; } else { vinfo.vid = vid_start; vinfo.flags = flags; if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, sizeof(vinfo), &vinfo)) goto nla_put_failure; } return 0; nla_put_failure: return -EMSGSIZE; } static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb, struct net_bridge_vlan_group *vg) { struct net_bridge_vlan *v; u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; u16 flags, pvid; int err = 0; /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan * and mark vlan info with begin and end flags * if vlaninfo represents a range */ pvid = br_get_pvid(vg); list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { flags = 0; if (!br_vlan_should_use(v)) continue; if (v->vid == pvid) flags |= BRIDGE_VLAN_INFO_PVID; if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) flags |= BRIDGE_VLAN_INFO_UNTAGGED; if (vid_range_start == 0) { goto initvars; } else if ((v->vid - vid_range_end) == 1 && flags == vid_range_flags) { vid_range_end = v->vid; continue; } else { err = br_fill_ifvlaninfo_range(skb, vid_range_start, vid_range_end, vid_range_flags); if (err) return err; } initvars: vid_range_start = v->vid; vid_range_end = v->vid; vid_range_flags = flags; } if (vid_range_start != 0) { /* Call it once more to send any left over vlans */ err = br_fill_ifvlaninfo_range(skb, vid_range_start, vid_range_end, vid_range_flags); if (err) return err; } return 0; } static int br_fill_ifvlaninfo(struct sk_buff *skb, struct net_bridge_vlan_group *vg) { struct bridge_vlan_info vinfo; struct net_bridge_vlan *v; u16 pvid; pvid = br_get_pvid(vg); list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { if (!br_vlan_should_use(v)) continue; vinfo.vid = v->vid; vinfo.flags = 0; if (v->vid == pvid) vinfo.flags |= BRIDGE_VLAN_INFO_PVID; if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED; if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, sizeof(vinfo), &vinfo)) goto nla_put_failure; } return 0; nla_put_failure: return -EMSGSIZE; } /* * Create one netlink message for one interface * Contains port and master info as well as carrier and bridge state. */ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *port, u32 pid, u32 seq, int event, unsigned int flags, u32 filter_mask, const struct net_device *dev, bool getlink) { u8 operstate = netif_running(dev) ? READ_ONCE(dev->operstate) : IF_OPER_DOWN; struct nlattr *af = NULL; struct net_bridge *br; struct ifinfomsg *hdr; struct nlmsghdr *nlh; if (port) br = port->br; else br = netdev_priv(dev); br_debug(br, "br_fill_info event %d port %s master %s\n", event, dev->name, br->dev->name); nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); if (nlh == NULL) return -EMSGSIZE; hdr = nlmsg_data(nlh); hdr->ifi_family = AF_BRIDGE; hdr->__ifi_pad = 0; hdr->ifi_type = dev->type; hdr->ifi_index = dev->ifindex; hdr->ifi_flags = netif_get_flags(dev); hdr->ifi_change = 0; if (nla_put_string(skb, IFLA_IFNAME, dev->name) || nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) || nla_put_u32(skb, IFLA_MTU, dev->mtu) || nla_put_u8(skb, IFLA_OPERSTATE, operstate) || (dev->addr_len && nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || (dev->ifindex != dev_get_iflink(dev) && nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) goto nla_put_failure; if (event == RTM_NEWLINK && port) { struct nlattr *nest; nest = nla_nest_start(skb, IFLA_PROTINFO); if (nest == NULL || br_port_fill_attrs(skb, port) < 0) goto nla_put_failure; nla_nest_end(skb, nest); } if (filter_mask & (RTEXT_FILTER_BRVLAN | RTEXT_FILTER_BRVLAN_COMPRESSED | RTEXT_FILTER_MRP | RTEXT_FILTER_CFM_CONFIG | RTEXT_FILTER_CFM_STATUS | RTEXT_FILTER_MST)) { af = nla_nest_start_noflag(skb, IFLA_AF_SPEC); if (!af) goto nla_put_failure; } /* Check if the VID information is requested */ if ((filter_mask & RTEXT_FILTER_BRVLAN) || (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { struct net_bridge_vlan_group *vg; int err; /* RCU needed because of the VLAN locking rules (rcu || rtnl) */ rcu_read_lock(); if (port) vg = nbp_vlan_group_rcu(port); else vg = br_vlan_group_rcu(br); if (!vg || !vg->num_vlans) { rcu_read_unlock(); goto done; } if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) err = br_fill_ifvlaninfo_compressed(skb, vg); else err = br_fill_ifvlaninfo(skb, vg); if (port && (port->flags & BR_VLAN_TUNNEL)) err = br_fill_vlan_tunnel_info(skb, vg); rcu_read_unlock(); if (err) goto nla_put_failure; } if (filter_mask & RTEXT_FILTER_MRP) { int err; if (!br_mrp_enabled(br) || port) goto done; rcu_read_lock(); err = br_mrp_fill_info(skb, br); rcu_read_unlock(); if (err) goto nla_put_failure; } if (filter_mask & (RTEXT_FILTER_CFM_CONFIG | RTEXT_FILTER_CFM_STATUS)) { struct nlattr *cfm_nest = NULL; int err; if (!br_cfm_created(br) || port) goto done; cfm_nest = nla_nest_start(skb, IFLA_BRIDGE_CFM); if (!cfm_nest) goto nla_put_failure; if (filter_mask & RTEXT_FILTER_CFM_CONFIG) { rcu_read_lock(); err = br_cfm_config_fill_info(skb, br); rcu_read_unlock(); if (err) goto nla_put_failure; } if (filter_mask & RTEXT_FILTER_CFM_STATUS) { rcu_read_lock(); err = br_cfm_status_fill_info(skb, br, getlink); rcu_read_unlock(); if (err) goto nla_put_failure; } nla_nest_end(skb, cfm_nest); } if ((filter_mask & RTEXT_FILTER_MST) && br_opt_get(br, BROPT_MST_ENABLED) && port) { const struct net_bridge_vlan_group *vg = nbp_vlan_group(port); struct nlattr *mst_nest; int err; if (!vg || !vg->num_vlans) goto done; mst_nest = nla_nest_start(skb, IFLA_BRIDGE_MST); if (!mst_nest) goto nla_put_failure; err = br_mst_fill_info(skb, vg); if (err) goto nla_put_failure; nla_nest_end(skb, mst_nest); } done: if (af) { if (nlmsg_get_pos(skb) - (void *)af > nla_attr_size(0)) nla_nest_end(skb, af); else nla_nest_cancel(skb, af); } nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } void br_info_notify(int event, const struct net_bridge *br, const struct net_bridge_port *port, u32 filter) { struct net_device *dev; struct sk_buff *skb; int err = -ENOBUFS; struct net *net; u16 port_no = 0; if (WARN_ON(!port && !br)) return; if (port) { dev = port->dev; br = port->br; port_no = port->port_no; } else { dev = br->dev; } net = dev_net(dev); br_debug(br, "port %u(%s) event %d\n", port_no, dev->name, event); skb = nlmsg_new(br_nlmsg_size(dev, filter), GFP_ATOMIC); if (skb == NULL) goto errout; err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev, false); if (err < 0) { /* -EMSGSIZE implies BUG in br_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); return; errout: rtnl_set_sk_err(net, RTNLGRP_LINK, err); } /* Notify listeners of a change in bridge or port information */ void br_ifinfo_notify(int event, const struct net_bridge *br, const struct net_bridge_port *port) { u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED; br_info_notify(event, br, port, filter); } /* * Dump information about all ports, in response to GETLINK */ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask, int nlflags) { struct net_bridge_port *port = br_port_get_rtnl(dev); if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) && !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) && !(filter_mask & RTEXT_FILTER_MRP) && !(filter_mask & RTEXT_FILTER_CFM_CONFIG) && !(filter_mask & RTEXT_FILTER_CFM_STATUS)) return 0; return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags, filter_mask, dev, true); } static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p, int cmd, struct bridge_vlan_info *vinfo, bool *changed, struct netlink_ext_ack *extack) { bool curr_change; int err = 0; switch (cmd) { case RTM_SETLINK: if (p) { /* if the MASTER flag is set this will act on the global * per-VLAN entry as well */ err = nbp_vlan_add(p, vinfo->vid, vinfo->flags, &curr_change, extack); } else { vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY; err = br_vlan_add(br, vinfo->vid, vinfo->flags, &curr_change, extack); } if (curr_change) *changed = true; break; case RTM_DELLINK: if (p) { if (!nbp_vlan_delete(p, vinfo->vid)) *changed = true; if ((vinfo->flags & BRIDGE_VLAN_INFO_MASTER) && !br_vlan_delete(p->br, vinfo->vid)) *changed = true; } else if (!br_vlan_delete(br, vinfo->vid)) { *changed = true; } break; } return err; } int br_process_vlan_info(struct net_bridge *br, struct net_bridge_port *p, int cmd, struct bridge_vlan_info *vinfo_curr, struct bridge_vlan_info **vinfo_last, bool *changed, struct netlink_ext_ack *extack) { int err, rtm_cmd; if (!br_vlan_valid_id(vinfo_curr->vid, extack)) return -EINVAL; /* needed for vlan-only NEWVLAN/DELVLAN notifications */ rtm_cmd = br_afspec_cmd_to_rtm(cmd); if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { if (!br_vlan_valid_range(vinfo_curr, *vinfo_last, extack)) return -EINVAL; *vinfo_last = vinfo_curr; return 0; } if (*vinfo_last) { struct bridge_vlan_info tmp_vinfo; int v, v_change_start = 0; if (!br_vlan_valid_range(vinfo_curr, *vinfo_last, extack)) return -EINVAL; memcpy(&tmp_vinfo, *vinfo_last, sizeof(struct bridge_vlan_info)); for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) { bool curr_change = false; tmp_vinfo.vid = v; err = br_vlan_info(br, p, cmd, &tmp_vinfo, &curr_change, extack); if (err) break; if (curr_change) { *changed = curr_change; if (!v_change_start) v_change_start = v; } else { /* nothing to notify yet */ if (!v_change_start) continue; br_vlan_notify(br, p, v_change_start, v - 1, rtm_cmd); v_change_start = 0; } cond_resched(); } /* v_change_start is set only if the last/whole range changed */ if (v_change_start) br_vlan_notify(br, p, v_change_start, v - 1, rtm_cmd); *vinfo_last = NULL; return err; } err = br_vlan_info(br, p, cmd, vinfo_curr, changed, extack); if (*changed) br_vlan_notify(br, p, vinfo_curr->vid, 0, rtm_cmd); return err; } static int br_afspec(struct net_bridge *br, struct net_bridge_port *p, struct nlattr *af_spec, int cmd, bool *changed, struct netlink_ext_ack *extack) { struct bridge_vlan_info *vinfo_curr = NULL; struct bridge_vlan_info *vinfo_last = NULL; struct nlattr *attr; struct vtunnel_info tinfo_last = {}; struct vtunnel_info tinfo_curr = {}; int err = 0, rem; nla_for_each_nested(attr, af_spec, rem) { err = 0; switch (nla_type(attr)) { case IFLA_BRIDGE_VLAN_TUNNEL_INFO: if (!p || !(p->flags & BR_VLAN_TUNNEL)) return -EINVAL; err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); if (err) return err; err = br_process_vlan_tunnel_info(br, p, cmd, &tinfo_curr, &tinfo_last, changed); if (err) return err; break; case IFLA_BRIDGE_VLAN_INFO: if (nla_len(attr) != sizeof(struct bridge_vlan_info)) return -EINVAL; vinfo_curr = nla_data(attr); err = br_process_vlan_info(br, p, cmd, vinfo_curr, &vinfo_last, changed, extack); if (err) return err; break; case IFLA_BRIDGE_MRP: err = br_mrp_parse(br, p, attr, cmd, extack); if (err) return err; break; case IFLA_BRIDGE_CFM: err = br_cfm_parse(br, p, attr, cmd, extack); if (err) return err; break; case IFLA_BRIDGE_MST: if (!p) { NL_SET_ERR_MSG(extack, "MST states can only be set on bridge ports"); return -EINVAL; } if (cmd != RTM_SETLINK) { NL_SET_ERR_MSG(extack, "MST states can only be set through RTM_SETLINK"); return -EINVAL; } err = br_mst_process(p, attr, extack); if (err) return err; break; } } return err; } static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_UNSPEC] = { .strict_start_type = IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT + 1 }, [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, [IFLA_BRPORT_COST] = { .type = NLA_U32 }, [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 }, [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 }, [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 }, [IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 }, [IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 }, [IFLA_BRPORT_VLAN_TUNNEL] = { .type = NLA_U8 }, [IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 }, [IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 }, [IFLA_BRPORT_ISOLATED] = { .type = NLA_U8 }, [IFLA_BRPORT_LOCKED] = { .type = NLA_U8 }, [IFLA_BRPORT_MAB] = { .type = NLA_U8 }, [IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 }, [IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT] = { .type = NLA_U32 }, [IFLA_BRPORT_MCAST_N_GROUPS] = { .type = NLA_REJECT }, [IFLA_BRPORT_MCAST_MAX_GROUPS] = { .type = NLA_U32 }, [IFLA_BRPORT_NEIGH_VLAN_SUPPRESS] = NLA_POLICY_MAX(NLA_U8, 1), [IFLA_BRPORT_BACKUP_NHID] = { .type = NLA_U32 }, }; /* Change the state of the port and notify spanning tree */ static int br_set_port_state(struct net_bridge_port *p, u8 state) { if (state > BR_STATE_BLOCKING) return -EINVAL; /* if kernel STP is running, don't allow changes */ if (p->br->stp_enabled == BR_KERNEL_STP) return -EBUSY; /* if device is not up, change is not allowed * if link is not present, only allowable state is disabled */ if (!netif_running(p->dev) || (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED)) return -ENETDOWN; br_set_state(p, state); br_port_state_selection(p->br); return 0; } /* Set/clear or port flags based on attribute */ static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[], int attrtype, unsigned long mask) { if (!tb[attrtype]) return; if (nla_get_u8(tb[attrtype])) p->flags |= mask; else p->flags &= ~mask; } /* Process bridge protocol info on port */ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[], struct netlink_ext_ack *extack) { unsigned long old_flags, changed_mask; bool br_vlan_tunnel_old; int err; old_flags = p->flags; br_vlan_tunnel_old = (old_flags & BR_VLAN_TUNNEL) ? true : false; br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD); br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST); br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD); br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI); br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL); br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS, BR_NEIGH_SUPPRESS); br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED); br_set_port_flag(p, tb, IFLA_BRPORT_LOCKED, BR_PORT_LOCKED); br_set_port_flag(p, tb, IFLA_BRPORT_MAB, BR_PORT_MAB); br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_VLAN_SUPPRESS, BR_NEIGH_VLAN_SUPPRESS); if ((p->flags & BR_PORT_MAB) && (!(p->flags & BR_PORT_LOCKED) || !(p->flags & BR_LEARNING))) { NL_SET_ERR_MSG(extack, "Bridge port must be locked and have learning enabled when MAB is enabled"); p->flags = old_flags; return -EINVAL; } else if (!(p->flags & BR_PORT_MAB) && (old_flags & BR_PORT_MAB)) { struct net_bridge_fdb_flush_desc desc = { .flags = BIT(BR_FDB_LOCKED), .flags_mask = BIT(BR_FDB_LOCKED), .port_ifindex = p->dev->ifindex, }; br_fdb_flush(p->br, &desc); } changed_mask = old_flags ^ p->flags; err = br_switchdev_set_port_flag(p, p->flags, changed_mask, extack); if (err) { p->flags = old_flags; return err; } if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL)) nbp_vlan_tunnel_info_flush(p); br_port_flags_change(p, changed_mask); if (tb[IFLA_BRPORT_COST]) { err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); if (err) return err; } if (tb[IFLA_BRPORT_PRIORITY]) { err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY])); if (err) return err; } if (tb[IFLA_BRPORT_STATE]) { err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE])); if (err) return err; } if (tb[IFLA_BRPORT_FLUSH]) br_fdb_delete_by_port(p->br, p, 0, 0); #ifdef CONFIG_BRIDGE_IGMP_SNOOPING if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) { u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]); err = br_multicast_set_port_router(&p->multicast_ctx, mcast_router); if (err) return err; } if (tb[IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT]) { u32 hlimit; hlimit = nla_get_u32(tb[IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT]); err = br_multicast_eht_set_hosts_limit(p, hlimit); if (err) return err; } if (tb[IFLA_BRPORT_MCAST_MAX_GROUPS]) { u32 max_groups; max_groups = nla_get_u32(tb[IFLA_BRPORT_MCAST_MAX_GROUPS]); br_multicast_ngroups_set_max(&p->multicast_ctx, max_groups); } #endif if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) { u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]); if (fwd_mask & BR_GROUPFWD_MACPAUSE) return -EINVAL; p->group_fwd_mask = fwd_mask; } if (tb[IFLA_BRPORT_BACKUP_PORT]) { struct net_device *backup_dev = NULL; u32 backup_ifindex; backup_ifindex = nla_get_u32(tb[IFLA_BRPORT_BACKUP_PORT]); if (backup_ifindex) { backup_dev = __dev_get_by_index(dev_net(p->dev), backup_ifindex); if (!backup_dev) return -ENOENT; } err = nbp_backup_change(p, backup_dev); if (err) return err; } if (tb[IFLA_BRPORT_BACKUP_NHID]) { u32 backup_nhid = nla_get_u32(tb[IFLA_BRPORT_BACKUP_NHID]); WRITE_ONCE(p->backup_nhid, backup_nhid); } return 0; } /* Change state and parameters on port. */ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags, struct netlink_ext_ack *extack) { struct net_bridge *br = (struct net_bridge *)netdev_priv(dev); struct nlattr *tb[IFLA_BRPORT_MAX + 1]; struct net_bridge_port *p; struct nlattr *protinfo; struct nlattr *afspec; bool changed = false; int err = 0; protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO); afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (!protinfo && !afspec) return 0; p = br_port_get_rtnl(dev); /* We want to accept dev as bridge itself if the AF_SPEC * is set to see if someone is setting vlan info on the bridge */ if (!p && !afspec) return -EINVAL; if (p && protinfo) { if (protinfo->nla_type & NLA_F_NESTED) { err = nla_parse_nested_deprecated(tb, IFLA_BRPORT_MAX, protinfo, br_port_policy, NULL); if (err) return err; spin_lock_bh(&p->br->lock); err = br_setport(p, tb, extack); spin_unlock_bh(&p->br->lock); } else { /* Binary compatibility with old RSTP */ if (nla_len(protinfo) < sizeof(u8)) return -EINVAL; spin_lock_bh(&p->br->lock); err = br_set_port_state(p, nla_get_u8(protinfo)); spin_unlock_bh(&p->br->lock); } if (err) goto out; changed = true; } if (afspec) err = br_afspec(br, p, afspec, RTM_SETLINK, &changed, extack); if (changed) br_ifinfo_notify(RTM_NEWLINK, br, p); out: return err; } /* Delete port information */ int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) { struct net_bridge *br = (struct net_bridge *)netdev_priv(dev); struct net_bridge_port *p; struct nlattr *afspec; bool changed = false; int err = 0; afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (!afspec) return 0; p = br_port_get_rtnl(dev); /* We want to accept dev as bridge itself as well */ if (!p && !netif_is_bridge_master(dev)) return -EINVAL; err = br_afspec(br, p, afspec, RTM_DELLINK, &changed, NULL); if (changed) /* Send RTM_NEWLINK because userspace * expects RTM_NEWLINK for vlan dels */ br_ifinfo_notify(RTM_NEWLINK, br, p); return err; } static int br_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } if (!data) return 0; #ifdef CONFIG_BRIDGE_VLAN_FILTERING if (data[IFLA_BR_VLAN_PROTOCOL] && !eth_type_vlan(nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]))) return -EPROTONOSUPPORT; if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); if (defpvid >= VLAN_VID_MASK) return -EINVAL; } #endif return 0; } static int br_port_slave_changelink(struct net_device *brdev, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct net_bridge *br = netdev_priv(brdev); int ret; if (!data) return 0; spin_lock_bh(&br->lock); ret = br_setport(br_port_get_rtnl(dev), data, extack); spin_unlock_bh(&br->lock); return ret; } static int br_port_fill_slave_info(struct sk_buff *skb, const struct net_device *brdev, const struct net_device *dev) { return br_port_fill_attrs(skb, br_port_get_rtnl(dev)); } static size_t br_port_get_slave_size(const struct net_device *brdev, const struct net_device *dev) { return br_port_info_size(); } static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { [IFLA_BR_UNSPEC] = { .strict_start_type = IFLA_BR_FDB_N_LEARNED }, [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 }, [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 }, [IFLA_BR_MAX_AGE] = { .type = NLA_U32 }, [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 }, [IFLA_BR_STP_STATE] = { .type = NLA_U32 }, [IFLA_BR_PRIORITY] = { .type = NLA_U16 }, [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 }, [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 }, [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 }, [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN }, [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 }, [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 }, [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 }, [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 }, [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 }, [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 }, [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 }, [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 }, [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 }, [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 }, [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 }, [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 }, [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 }, [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 }, [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 }, [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 }, [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 }, [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 }, [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 }, [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 }, [IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 }, [IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 }, [IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 }, [IFLA_BR_MULTI_BOOLOPT] = NLA_POLICY_EXACT_LEN(sizeof(struct br_boolopt_multi)), [IFLA_BR_FDB_N_LEARNED] = { .type = NLA_REJECT }, [IFLA_BR_FDB_MAX_LEARNED] = { .type = NLA_U32 }, }; static int br_changelink(struct net_device *brdev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct net_bridge *br = netdev_priv(brdev); int err; if (!data) return 0; if (data[IFLA_BR_FORWARD_DELAY]) { err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY])); if (err) return err; } if (data[IFLA_BR_HELLO_TIME]) { err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME])); if (err) return err; } if (data[IFLA_BR_MAX_AGE]) { err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE])); if (err) return err; } if (data[IFLA_BR_AGEING_TIME]) { err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME])); if (err) return err; } if (data[IFLA_BR_STP_STATE]) { u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]); err = br_stp_set_enabled(br, stp_enabled, extack); if (err) return err; } if (data[IFLA_BR_PRIORITY]) { u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]); br_stp_set_bridge_priority(br, priority); } if (data[IFLA_BR_VLAN_FILTERING]) { u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]); err = br_vlan_filter_toggle(br, vlan_filter, extack); if (err) return err; } #ifdef CONFIG_BRIDGE_VLAN_FILTERING if (data[IFLA_BR_VLAN_PROTOCOL]) { __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]); err = __br_vlan_set_proto(br, vlan_proto, extack); if (err) return err; } if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); err = __br_vlan_set_default_pvid(br, defpvid, extack); if (err) return err; } if (data[IFLA_BR_VLAN_STATS_ENABLED]) { __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]); err = br_vlan_set_stats(br, vlan_stats); if (err) return err; } if (data[IFLA_BR_VLAN_STATS_PER_PORT]) { __u8 per_port = nla_get_u8(data[IFLA_BR_VLAN_STATS_PER_PORT]); err = br_vlan_set_stats_per_port(br, per_port); if (err) return err; } #endif if (data[IFLA_BR_GROUP_FWD_MASK]) { u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]); if (fwd_mask & BR_GROUPFWD_RESTRICTED) return -EINVAL; br->group_fwd_mask = fwd_mask; } if (data[IFLA_BR_GROUP_ADDR]) { u8 new_addr[ETH_ALEN]; if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN) return -EINVAL; memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN); if (!is_link_local_ether_addr(new_addr)) return -EINVAL; if (new_addr[5] == 1 || /* 802.3x Pause address */ new_addr[5] == 2 || /* 802.3ad Slow protocols */ new_addr[5] == 3) /* 802.1X PAE address */ return -EINVAL; spin_lock_bh(&br->lock); memcpy(br->group_addr, new_addr, sizeof(br->group_addr)); spin_unlock_bh(&br->lock); br_opt_toggle(br, BROPT_GROUP_ADDR_SET, true); br_recalculate_fwd_mask(br); } if (data[IFLA_BR_FDB_FLUSH]) { struct net_bridge_fdb_flush_desc desc = { .flags_mask = BIT(BR_FDB_STATIC) }; br_fdb_flush(br, &desc); } #ifdef CONFIG_BRIDGE_IGMP_SNOOPING if (data[IFLA_BR_MCAST_ROUTER]) { u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]); err = br_multicast_set_router(&br->multicast_ctx, multicast_router); if (err) return err; } if (data[IFLA_BR_MCAST_SNOOPING]) { u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]); err = br_multicast_toggle(br, mcast_snooping, extack); if (err) return err; } if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) { u8 val; val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]); br_opt_toggle(br, BROPT_MULTICAST_QUERY_USE_IFADDR, !!val); } if (data[IFLA_BR_MCAST_QUERIER]) { u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]); err = br_multicast_set_querier(&br->multicast_ctx, mcast_querier); if (err) return err; } if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) br_warn(br, "the hash_elasticity option has been deprecated and is always %u\n", RHT_ELASTICITY); if (data[IFLA_BR_MCAST_HASH_MAX]) br->hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]); if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) { u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]); br->multicast_ctx.multicast_last_member_count = val; } if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) { u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]); br->multicast_ctx.multicast_startup_query_count = val; } if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) { u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]); br->multicast_ctx.multicast_last_member_interval = clock_t_to_jiffies(val); } if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) { u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]); br->multicast_ctx.multicast_membership_interval = clock_t_to_jiffies(val); } if (data[IFLA_BR_MCAST_QUERIER_INTVL]) { u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]); br->multicast_ctx.multicast_querier_interval = clock_t_to_jiffies(val); } if (data[IFLA_BR_MCAST_QUERY_INTVL]) { u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]); br_multicast_set_query_intvl(&br->multicast_ctx, val); } if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) { u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]); br->multicast_ctx.multicast_query_response_interval = clock_t_to_jiffies(val); } if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) { u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]); br_multicast_set_startup_query_intvl(&br->multicast_ctx, val); } if (data[IFLA_BR_MCAST_STATS_ENABLED]) { __u8 mcast_stats; mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]); br_opt_toggle(br, BROPT_MULTICAST_STATS_ENABLED, !!mcast_stats); } if (data[IFLA_BR_MCAST_IGMP_VERSION]) { __u8 igmp_version; igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]); err = br_multicast_set_igmp_version(&br->multicast_ctx, igmp_version); if (err) return err; } #if IS_ENABLED(CONFIG_IPV6) if (data[IFLA_BR_MCAST_MLD_VERSION]) { __u8 mld_version; mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]); err = br_multicast_set_mld_version(&br->multicast_ctx, mld_version); if (err) return err; } #endif #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (data[IFLA_BR_NF_CALL_IPTABLES]) { u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]); br_opt_toggle(br, BROPT_NF_CALL_IPTABLES, !!val); } if (data[IFLA_BR_NF_CALL_IP6TABLES]) { u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]); br_opt_toggle(br, BROPT_NF_CALL_IP6TABLES, !!val); } if (data[IFLA_BR_NF_CALL_ARPTABLES]) { u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]); br_opt_toggle(br, BROPT_NF_CALL_ARPTABLES, !!val); } #endif if (data[IFLA_BR_MULTI_BOOLOPT]) { struct br_boolopt_multi *bm; bm = nla_data(data[IFLA_BR_MULTI_BOOLOPT]); err = br_boolopt_multi_toggle(br, bm, extack); if (err) return err; } if (data[IFLA_BR_FDB_MAX_LEARNED]) { u32 val = nla_get_u32(data[IFLA_BR_FDB_MAX_LEARNED]); WRITE_ONCE(br->fdb_max_learned, val); } return 0; } static int br_dev_newlink(struct net_device *dev, struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct net_bridge *br = netdev_priv(dev); struct nlattr **data = params->data; struct nlattr **tb = params->tb; int err; err = register_netdevice(dev); if (err) return err; if (tb[IFLA_ADDRESS]) { spin_lock_bh(&br->lock); br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); spin_unlock_bh(&br->lock); } err = br_changelink(dev, tb, data, extack); if (err) br_dev_delete(dev, NULL); return err; } static size_t br_get_size(const struct net_device *brdev) { return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */ nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */ nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */ nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */ nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */ nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */ #ifdef CONFIG_BRIDGE_VLAN_FILTERING nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */ nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */ nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */ nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_PER_PORT */ #endif nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */ nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */ nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */ nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */ nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */ nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */ nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */ nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */ nla_total_size(sizeof(u32)) + /* IFLA_BR_FDB_N_LEARNED */ nla_total_size(sizeof(u32)) + /* IFLA_BR_FDB_MAX_LEARNED */ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */ nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */ nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */ nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */ nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */ nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */ nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */ nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */ nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */ nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_IGMP_VERSION */ nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_MLD_VERSION */ br_multicast_querier_state_size() + /* IFLA_BR_MCAST_QUERIER_STATE */ #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */ nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */ nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */ #endif nla_total_size(sizeof(struct br_boolopt_multi)) + /* IFLA_BR_MULTI_BOOLOPT */ 0; } static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) { struct net_bridge *br = netdev_priv(brdev); u32 forward_delay = jiffies_to_clock_t(br->forward_delay); u32 hello_time = jiffies_to_clock_t(br->hello_time); u32 age_time = jiffies_to_clock_t(br->max_age); u32 ageing_time = jiffies_to_clock_t(br->ageing_time); u32 stp_enabled = br->stp_enabled; u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]; u8 vlan_enabled = br_vlan_enabled(br->dev); struct br_boolopt_multi bm; u64 clockval; clockval = br_timer_value(&br->hello_timer); if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD)) return -EMSGSIZE; clockval = br_timer_value(&br->tcn_timer); if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD)) return -EMSGSIZE; clockval = br_timer_value(&br->topology_change_timer); if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval, IFLA_BR_PAD)) return -EMSGSIZE; clockval = br_timer_value(&br->gc_work.timer); if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD)) return -EMSGSIZE; br_boolopt_multi_get(br, &bm); if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) || nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) || nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) || nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) || nla_put_u16(skb, IFLA_BR_PRIORITY, priority) || nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) || nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) || nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id), &br->bridge_id) || nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id), &br->designated_root) || nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) || nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) || nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) || nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED, br->topology_change_detected) || nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr) || nla_put(skb, IFLA_BR_MULTI_BOOLOPT, sizeof(bm), &bm) || nla_put_u32(skb, IFLA_BR_FDB_N_LEARNED, atomic_read(&br->fdb_n_learned)) || nla_put_u32(skb, IFLA_BR_FDB_MAX_LEARNED, br->fdb_max_learned)) return -EMSGSIZE; #ifdef CONFIG_BRIDGE_VLAN_FILTERING if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) || nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) || nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) || nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT, br_opt_get(br, BROPT_VLAN_STATS_PER_PORT))) return -EMSGSIZE; #endif #ifdef CONFIG_BRIDGE_IGMP_SNOOPING if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_ctx.multicast_router) || nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, br_opt_get(br, BROPT_MULTICAST_ENABLED)) || nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR, br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR)) || nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_ctx.multicast_querier) || nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED, br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) || nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, RHT_ELASTICITY) || nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) || nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT, br->multicast_ctx.multicast_last_member_count) || nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT, br->multicast_ctx.multicast_startup_query_count) || nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION, br->multicast_ctx.multicast_igmp_version) || br_multicast_dump_querier_state(skb, &br->multicast_ctx, IFLA_BR_MCAST_QUERIER_STATE)) return -EMSGSIZE; #if IS_ENABLED(CONFIG_IPV6) if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION, br->multicast_ctx.multicast_mld_version)) return -EMSGSIZE; #endif clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_last_member_interval); if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval, IFLA_BR_PAD)) return -EMSGSIZE; clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_membership_interval); if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval, IFLA_BR_PAD)) return -EMSGSIZE; clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_querier_interval); if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval, IFLA_BR_PAD)) return -EMSGSIZE; clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_query_interval); if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval, IFLA_BR_PAD)) return -EMSGSIZE; clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_query_response_interval); if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval, IFLA_BR_PAD)) return -EMSGSIZE; clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_startup_query_interval); if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval, IFLA_BR_PAD)) return -EMSGSIZE; #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES, br_opt_get(br, BROPT_NF_CALL_IPTABLES) ? 1 : 0) || nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES, br_opt_get(br, BROPT_NF_CALL_IP6TABLES) ? 1 : 0) || nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES, br_opt_get(br, BROPT_NF_CALL_ARPTABLES) ? 1 : 0)) return -EMSGSIZE; #endif return 0; } static size_t br_get_linkxstats_size(const struct net_device *dev, int attr) { struct net_bridge_port *p = NULL; struct net_bridge_vlan_group *vg; struct net_bridge_vlan *v; struct net_bridge *br; int numvls = 0; switch (attr) { case IFLA_STATS_LINK_XSTATS: br = netdev_priv(dev); vg = br_vlan_group(br); break; case IFLA_STATS_LINK_XSTATS_SLAVE: p = br_port_get_rtnl(dev); if (!p) return 0; vg = nbp_vlan_group(p); break; default: return 0; } if (vg) { /* we need to count all, even placeholder entries */ list_for_each_entry(v, &vg->vlan_list, vlist) numvls++; } return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) + nla_total_size_64bit(sizeof(struct br_mcast_stats)) + (p ? nla_total_size_64bit(sizeof(p->stp_xstats)) : 0) + nla_total_size(0); } static int br_fill_linkxstats(struct sk_buff *skb, const struct net_device *dev, int *prividx, int attr) { struct nlattr *nla __maybe_unused; struct net_bridge_port *p = NULL; struct net_bridge_vlan_group *vg; struct net_bridge_vlan *v; struct net_bridge *br; struct nlattr *nest; int vl_idx = 0; switch (attr) { case IFLA_STATS_LINK_XSTATS: br = netdev_priv(dev); vg = br_vlan_group(br); break; case IFLA_STATS_LINK_XSTATS_SLAVE: p = br_port_get_rtnl(dev); if (!p) return 0; br = p->br; vg = nbp_vlan_group(p); break; default: return -EINVAL; } nest = nla_nest_start_noflag(skb, LINK_XSTATS_TYPE_BRIDGE); if (!nest) return -EMSGSIZE; if (vg) { u16 pvid; pvid = br_get_pvid(vg); list_for_each_entry(v, &vg->vlan_list, vlist) { struct bridge_vlan_xstats vxi; struct pcpu_sw_netstats stats; if (++vl_idx < *prividx) continue; memset(&vxi, 0, sizeof(vxi)); vxi.vid = v->vid; vxi.flags = v->flags; if (v->vid == pvid) vxi.flags |= BRIDGE_VLAN_INFO_PVID; br_vlan_get_stats(v, &stats); vxi.rx_bytes = u64_stats_read(&stats.rx_bytes); vxi.rx_packets = u64_stats_read(&stats.rx_packets); vxi.tx_bytes = u64_stats_read(&stats.tx_bytes); vxi.tx_packets = u64_stats_read(&stats.tx_packets); if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi)) goto nla_put_failure; } } #ifdef CONFIG_BRIDGE_IGMP_SNOOPING if (++vl_idx >= *prividx) { nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST, sizeof(struct br_mcast_stats), BRIDGE_XSTATS_PAD); if (!nla) goto nla_put_failure; br_multicast_get_stats(br, p, nla_data(nla)); } #endif if (p) { nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_STP, sizeof(p->stp_xstats), BRIDGE_XSTATS_PAD); if (!nla) goto nla_put_failure; spin_lock_bh(&br->lock); memcpy(nla_data(nla), &p->stp_xstats, sizeof(p->stp_xstats)); spin_unlock_bh(&br->lock); } nla_nest_end(skb, nest); *prividx = 0; return 0; nla_put_failure: nla_nest_end(skb, nest); *prividx = vl_idx; return -EMSGSIZE; } static struct rtnl_af_ops br_af_ops __read_mostly = { .family = AF_BRIDGE, .get_link_af_size = br_get_link_af_size_filtered, }; struct rtnl_link_ops br_link_ops __read_mostly = { .kind = "bridge", .priv_size = sizeof(struct net_bridge), .setup = br_dev_setup, .maxtype = IFLA_BR_MAX, .policy = br_policy, .validate = br_validate, .newlink = br_dev_newlink, .changelink = br_changelink, .dellink = br_dev_delete, .get_size = br_get_size, .fill_info = br_fill_info, .fill_linkxstats = br_fill_linkxstats, .get_linkxstats_size = br_get_linkxstats_size, .slave_maxtype = IFLA_BRPORT_MAX, .slave_policy = br_port_policy, .slave_changelink = br_port_slave_changelink, .get_slave_size = br_port_get_slave_size, .fill_slave_info = br_port_fill_slave_info, }; int __init br_netlink_init(void) { int err; err = br_vlan_rtnl_init(); if (err) goto out; err = rtnl_af_register(&br_af_ops); if (err) goto out_vlan; err = rtnl_link_register(&br_link_ops); if (err) goto out_af; return 0; out_af: rtnl_af_unregister(&br_af_ops); out_vlan: br_vlan_rtnl_uninit(); out: return err; } void br_netlink_fini(void) { br_vlan_rtnl_uninit(); rtnl_af_unregister(&br_af_ops); rtnl_link_unregister(&br_link_ops); }
90 13 3 1 73 56 1 79 80 1 10 1 4 4 1 1 6 1 5 63 53 4 29 23 2 2 4 22 27 2 45 2 1 1 1 27 1 15 40 1 1 2 4 21 1 18 2 19 11 1 1 1 2 5 3 2 1 1 12 18 2 25 4 21 25 1 17 39 1 1 5 29 4 5 28 2 6 25 25 24 24 11 8 5 17 17 29 4 43 5 20 18 6 38 7 7 1 6 4 2 1 1 67 65 66 1 65 1 1 55 57 2 1 56 5 5 13 13 12 10 11 10 11 11 11 1 66 66 66 64 65 13 3 2 2 1 4 3 4 2 4 3 2 1 6 7 3 4 4 2 2 6 2 4 5 2 2 1 1 2 2 3 3 1 11 2 9 3 6 1 1 9 2 11 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 // SPDX-License-Identifier: GPL-2.0-or-later /* * V4L2 controls framework uAPI implementation: * * Copyright (C) 2010-2021 Hans Verkuil <hverkuil@kernel.org> */ #define pr_fmt(fmt) "v4l2-ctrls: " fmt #include <linux/export.h> #include <linux/mm.h> #include <linux/slab.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-dev.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-ioctl.h> #include "v4l2-ctrls-priv.h" /* Internal temporary helper struct, one for each v4l2_ext_control */ struct v4l2_ctrl_helper { /* Pointer to the control reference of the master control */ struct v4l2_ctrl_ref *mref; /* The control ref corresponding to the v4l2_ext_control ID field. */ struct v4l2_ctrl_ref *ref; /* * v4l2_ext_control index of the next control belonging to the * same cluster, or 0 if there isn't any. */ u32 next; }; /* * Helper functions to copy control payload data from kernel space to * user space and vice versa. */ /* Helper function: copy the given control value back to the caller */ static int ptr_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr) { u32 len; if (ctrl->is_ptr && !ctrl->is_string) return copy_to_user(c->ptr, ptr.p_const, c->size) ? -EFAULT : 0; switch (ctrl->type) { case V4L2_CTRL_TYPE_STRING: len = strlen(ptr.p_char); if (c->size < len + 1) { c->size = ctrl->elem_size; return -ENOSPC; } return copy_to_user(c->string, ptr.p_char, len + 1) ? -EFAULT : 0; case V4L2_CTRL_TYPE_INTEGER64: c->value64 = *ptr.p_s64; break; default: c->value = *ptr.p_s32; break; } return 0; } /* Helper function: copy the current control value back to the caller */ static int cur_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) { return ptr_to_user(c, ctrl, ctrl->p_cur); } /* Helper function: copy the new control value back to the caller */ static int new_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) { return ptr_to_user(c, ctrl, ctrl->p_new); } /* Helper function: copy the request value back to the caller */ static int req_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl_ref *ref) { return ptr_to_user(c, ref->ctrl, ref->p_req); } /* Helper function: copy the initial control value back to the caller */ static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) { ctrl->type_ops->init(ctrl, 0, ctrl->p_new); return ptr_to_user(c, ctrl, ctrl->p_new); } /* Helper function: copy the minimum control value back to the caller */ static int min_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) { ctrl->type_ops->minimum(ctrl, 0, ctrl->p_new); return ptr_to_user(c, ctrl, ctrl->p_new); } /* Helper function: copy the maximum control value back to the caller */ static int max_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) { ctrl->type_ops->maximum(ctrl, 0, ctrl->p_new); return ptr_to_user(c, ctrl, ctrl->p_new); } /* Helper function: copy the caller-provider value as the new control value */ static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) { int ret; u32 size; ctrl->is_new = 0; if (ctrl->is_dyn_array && c->size > ctrl->p_array_alloc_elems * ctrl->elem_size) { void *old = ctrl->p_array; void *tmp = kvzalloc(2 * c->size, GFP_KERNEL); if (!tmp) return -ENOMEM; memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size); memcpy(tmp + c->size, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size); ctrl->p_new.p = tmp; ctrl->p_cur.p = tmp + c->size; ctrl->p_array = tmp; ctrl->p_array_alloc_elems = c->size / ctrl->elem_size; kvfree(old); } if (ctrl->is_ptr && !ctrl->is_string) { unsigned int elems = c->size / ctrl->elem_size; if (copy_from_user(ctrl->p_new.p, c->ptr, c->size)) return -EFAULT; ctrl->is_new = 1; if (ctrl->is_dyn_array) ctrl->new_elems = elems; else if (ctrl->is_array) ctrl->type_ops->init(ctrl, elems, ctrl->p_new); return 0; } switch (ctrl->type) { case V4L2_CTRL_TYPE_INTEGER64: *ctrl->p_new.p_s64 = c->value64; break; case V4L2_CTRL_TYPE_STRING: size = c->size; if (size == 0) return -ERANGE; if (size > ctrl->maximum + 1) size = ctrl->maximum + 1; ret = copy_from_user(ctrl->p_new.p_char, c->string, size) ? -EFAULT : 0; if (!ret) { char last = ctrl->p_new.p_char[size - 1]; ctrl->p_new.p_char[size - 1] = 0; /* * If the string was longer than ctrl->maximum, * then return an error. */ if (strlen(ctrl->p_new.p_char) == ctrl->maximum && last) return -ERANGE; ctrl->is_new = 1; } return ret; default: *ctrl->p_new.p_s32 = c->value; break; } ctrl->is_new = 1; return 0; } /* * VIDIOC_G/TRY/S_EXT_CTRLS implementation */ /* * Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS: * * It is not a fully atomic operation, just best-effort only. After all, if * multiple controls have to be set through multiple i2c writes (for example) * then some initial writes may succeed while others fail. Thus leaving the * system in an inconsistent state. The question is how much effort you are * willing to spend on trying to make something atomic that really isn't. * * From the point of view of an application the main requirement is that * when you call VIDIOC_S_EXT_CTRLS and some values are invalid then an * error should be returned without actually affecting any controls. * * If all the values are correct, then it is acceptable to just give up * in case of low-level errors. * * It is important though that the application can tell when only a partial * configuration was done. The way we do that is through the error_idx field * of struct v4l2_ext_controls: if that is equal to the count field then no * controls were affected. Otherwise all controls before that index were * successful in performing their 'get' or 'set' operation, the control at * the given index failed, and you don't know what happened with the controls * after the failed one. Since if they were part of a control cluster they * could have been successfully processed (if a cluster member was encountered * at index < error_idx), they could have failed (if a cluster member was at * error_idx), or they may not have been processed yet (if the first cluster * member appeared after error_idx). * * It is all fairly theoretical, though. In practice all you can do is to * bail out. If error_idx == count, then it is an application bug. If * error_idx < count then it is only an application bug if the error code was * EBUSY. That usually means that something started streaming just when you * tried to set the controls. In all other cases it is a driver/hardware * problem and all you can do is to retry or bail out. * * Note that these rules do not apply to VIDIOC_TRY_EXT_CTRLS: since that * never modifies controls the error_idx is just set to whatever control * has an invalid value. */ /* * Prepare for the extended g/s/try functions. * Find the controls in the control array and do some basic checks. */ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs, struct v4l2_ctrl_helper *helpers, struct video_device *vdev, bool get) { struct v4l2_ctrl_helper *h; bool have_clusters = false; u32 i; for (i = 0, h = helpers; i < cs->count; i++, h++) { struct v4l2_ext_control *c = &cs->controls[i]; struct v4l2_ctrl_ref *ref; struct v4l2_ctrl *ctrl; u32 id = c->id & V4L2_CTRL_ID_MASK; cs->error_idx = i; if (cs->which && (cs->which < V4L2_CTRL_WHICH_DEF_VAL || cs->which > V4L2_CTRL_WHICH_MAX_VAL) && V4L2_CTRL_ID2WHICH(id) != cs->which) { dprintk(vdev, "invalid which 0x%x or control id 0x%x\n", cs->which, id); return -EINVAL; } /* * Old-style private controls are not allowed for * extended controls. */ if (id >= V4L2_CID_PRIVATE_BASE) { dprintk(vdev, "old-style private controls not allowed\n"); return -EINVAL; } ref = find_ref_lock(hdl, id); if (!ref) { dprintk(vdev, "cannot find control id 0x%x\n", id); return -EINVAL; } h->ref = ref; ctrl = ref->ctrl; if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED) { dprintk(vdev, "control id 0x%x is disabled\n", id); return -EINVAL; } if (!(ctrl->flags & V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX) && (cs->which == V4L2_CTRL_WHICH_MIN_VAL || cs->which == V4L2_CTRL_WHICH_MAX_VAL)) { dprintk(vdev, "invalid which 0x%x or control id 0x%x\n", cs->which, id); return -EINVAL; } if (ctrl->cluster[0]->ncontrols > 1) have_clusters = true; if (ctrl->cluster[0] != ctrl) ref = find_ref_lock(hdl, ctrl->cluster[0]->id); if (ctrl->is_dyn_array) { unsigned int max_size = ctrl->dims[0] * ctrl->elem_size; unsigned int tot_size = ctrl->elem_size; if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) tot_size *= ref->p_req_elems; else tot_size *= ctrl->elems; c->size = ctrl->elem_size * (c->size / ctrl->elem_size); if (get) { if (c->size < tot_size) { c->size = tot_size; return -ENOSPC; } c->size = tot_size; } else { if (c->size > max_size) { c->size = max_size; return -ENOSPC; } if (!c->size) return -EFAULT; } } else if (ctrl->is_ptr && !ctrl->is_string) { unsigned int tot_size = ctrl->elems * ctrl->elem_size; if (c->size < tot_size) { /* * In the get case the application first * queries to obtain the size of the control. */ if (get) { c->size = tot_size; return -ENOSPC; } dprintk(vdev, "pointer control id 0x%x size too small, %d bytes but %d bytes needed\n", id, c->size, tot_size); return -EFAULT; } c->size = tot_size; } /* Store the ref to the master control of the cluster */ h->mref = ref; /* * Initially set next to 0, meaning that there is no other * control in this helper array belonging to the same * cluster. */ h->next = 0; } /* * We are done if there were no controls that belong to a multi- * control cluster. */ if (!have_clusters) return 0; /* * The code below figures out in O(n) time which controls in the list * belong to the same cluster. */ /* This has to be done with the handler lock taken. */ mutex_lock(hdl->lock); /* First zero the helper field in the master control references */ for (i = 0; i < cs->count; i++) helpers[i].mref->helper = NULL; for (i = 0, h = helpers; i < cs->count; i++, h++) { struct v4l2_ctrl_ref *mref = h->mref; /* * If the mref->helper is set, then it points to an earlier * helper that belongs to the same cluster. */ if (mref->helper) { /* * Set the next field of mref->helper to the current * index: this means that the earlier helper now * points to the next helper in the same cluster. */ mref->helper->next = i; /* * mref should be set only for the first helper in the * cluster, clear the others. */ h->mref = NULL; } /* Point the mref helper to the current helper struct. */ mref->helper = h; } mutex_unlock(hdl->lock); return 0; } /* * Handles the corner case where cs->count == 0. It checks whether the * specified control class exists. If that class ID is 0, then it checks * whether there are any controls at all. */ static int class_check(struct v4l2_ctrl_handler *hdl, u32 which) { if (which == 0 || (which >= V4L2_CTRL_WHICH_DEF_VAL && which <= V4L2_CTRL_WHICH_MAX_VAL)) return 0; return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL; } /* * Get extended controls. Allocates the helpers array if needed. * * Note that v4l2_g_ext_ctrls_common() with 'which' set to * V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was * completed, and in that case p_req_valid is true for all controls. */ int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs, struct video_device *vdev) { struct v4l2_ctrl_helper helper[4]; struct v4l2_ctrl_helper *helpers = helper; int ret; int i, j; bool is_default, is_request, is_min, is_max; is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL); is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL); is_min = (cs->which == V4L2_CTRL_WHICH_MIN_VAL); is_max = (cs->which == V4L2_CTRL_WHICH_MAX_VAL); cs->error_idx = cs->count; cs->which = V4L2_CTRL_ID2WHICH(cs->which); if (!hdl) return -EINVAL; if (cs->count == 0) return class_check(hdl, cs->which); if (cs->count > ARRAY_SIZE(helper)) { helpers = kvmalloc_array(cs->count, sizeof(helper[0]), GFP_KERNEL); if (!helpers) return -ENOMEM; } ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, true); cs->error_idx = cs->count; for (i = 0; !ret && i < cs->count; i++) if (helpers[i].ref->ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY) ret = -EACCES; for (i = 0; !ret && i < cs->count; i++) { struct v4l2_ctrl *master; bool is_volatile = false; u32 idx = i; if (!helpers[i].mref) continue; master = helpers[i].mref->ctrl; cs->error_idx = i; v4l2_ctrl_lock(master); /* * g_volatile_ctrl will update the new control values. * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL, * V4L2_CTRL_WHICH_MIN_VAL, V4L2_CTRL_WHICH_MAX_VAL and * V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests * it is v4l2_ctrl_request_complete() that copies the * volatile controls at the time of request completion * to the request, so you don't want to do that again. */ if (!is_default && !is_request && !is_min && !is_max && ((master->flags & V4L2_CTRL_FLAG_VOLATILE) || (master->has_volatiles && !is_cur_manual(master)))) { for (j = 0; j < master->ncontrols; j++) cur_to_new(master->cluster[j]); ret = call_op(master, g_volatile_ctrl); is_volatile = true; } if (ret) { v4l2_ctrl_unlock(master); break; } /* * Copy the default value (if is_default is true), the * request value (if is_request is true and p_req is valid), * the new volatile value (if is_volatile is true) or the * current value. */ do { struct v4l2_ctrl_ref *ref = helpers[idx].ref; if (is_default) ret = def_to_user(cs->controls + idx, ref->ctrl); else if (is_request && ref->p_req_array_enomem) ret = -ENOMEM; else if (is_request && ref->p_req_valid) ret = req_to_user(cs->controls + idx, ref); else if (is_min) ret = min_to_user(cs->controls + idx, ref->ctrl); else if (is_max) ret = max_to_user(cs->controls + idx, ref->ctrl); else if (is_volatile) ret = new_to_user(cs->controls + idx, ref->ctrl); else ret = cur_to_user(cs->controls + idx, ref->ctrl); idx = helpers[idx].next; } while (!ret && idx); v4l2_ctrl_unlock(master); } if (cs->count > ARRAY_SIZE(helper)) kvfree(helpers); return ret; } int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, struct media_device *mdev, struct v4l2_ext_controls *cs) { if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) return v4l2_g_ext_ctrls_request(hdl, vdev, mdev, cs); return v4l2_g_ext_ctrls_common(hdl, cs, vdev); } EXPORT_SYMBOL(v4l2_g_ext_ctrls); /* Validate a new control */ static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new) { return ctrl->type_ops->validate(ctrl, p_new); } /* Validate controls. */ static int validate_ctrls(struct v4l2_ext_controls *cs, struct v4l2_ctrl_helper *helpers, struct video_device *vdev, bool set) { unsigned int i; int ret = 0; cs->error_idx = cs->count; for (i = 0; i < cs->count; i++) { struct v4l2_ctrl *ctrl = helpers[i].ref->ctrl; union v4l2_ctrl_ptr p_new; cs->error_idx = i; if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) { dprintk(vdev, "control id 0x%x is read-only\n", ctrl->id); return -EACCES; } /* * This test is also done in try_set_control_cluster() which * is called in atomic context, so that has the final say, * but it makes sense to do an up-front check as well. Once * an error occurs in try_set_control_cluster() some other * controls may have been set already and we want to do a * best-effort to avoid that. */ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) { dprintk(vdev, "control id 0x%x is grabbed, cannot set\n", ctrl->id); return -EBUSY; } /* * Skip validation for now if the payload needs to be copied * from userspace into kernelspace. We'll validate those later. */ if (ctrl->is_ptr) continue; if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) p_new.p_s64 = &cs->controls[i].value64; else p_new.p_s32 = &cs->controls[i].value; ret = validate_new(ctrl, p_new); if (ret) return ret; } return 0; } /* Try or try-and-set controls */ int try_set_ext_ctrls_common(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs, struct video_device *vdev, bool set) { struct v4l2_ctrl_helper helper[4]; struct v4l2_ctrl_helper *helpers = helper; unsigned int i, j; int ret; cs->error_idx = cs->count; /* Default/minimum/maximum values cannot be changed */ if (cs->which == V4L2_CTRL_WHICH_DEF_VAL || cs->which == V4L2_CTRL_WHICH_MIN_VAL || cs->which == V4L2_CTRL_WHICH_MAX_VAL) { dprintk(vdev, "%s: cannot change default/min/max value\n", video_device_node_name(vdev)); return -EINVAL; } cs->which = V4L2_CTRL_ID2WHICH(cs->which); if (!hdl) { dprintk(vdev, "%s: invalid null control handler\n", video_device_node_name(vdev)); return -EINVAL; } if (cs->count == 0) return class_check(hdl, cs->which); if (cs->count > ARRAY_SIZE(helper)) { helpers = kvmalloc_array(cs->count, sizeof(helper[0]), GFP_KERNEL); if (!helpers) return -ENOMEM; } ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, false); if (!ret) ret = validate_ctrls(cs, helpers, vdev, set); if (ret && set) cs->error_idx = cs->count; for (i = 0; !ret && i < cs->count; i++) { struct v4l2_ctrl *master; u32 idx = i; if (!helpers[i].mref) continue; cs->error_idx = i; master = helpers[i].mref->ctrl; v4l2_ctrl_lock(master); /* Reset the 'is_new' flags of the cluster */ for (j = 0; j < master->ncontrols; j++) if (master->cluster[j]) master->cluster[j]->is_new = 0; /* * For volatile autoclusters that are currently in auto mode * we need to discover if it will be set to manual mode. * If so, then we have to copy the current volatile values * first since those will become the new manual values (which * may be overwritten by explicit new values from this set * of controls). */ if (master->is_auto && master->has_volatiles && !is_cur_manual(master)) { /* Pick an initial non-manual value */ s32 new_auto_val = master->manual_mode_value + 1; u32 tmp_idx = idx; do { /* * Check if the auto control is part of the * list, and remember the new value. */ if (helpers[tmp_idx].ref->ctrl == master) new_auto_val = cs->controls[tmp_idx].value; tmp_idx = helpers[tmp_idx].next; } while (tmp_idx); /* * If the new value == the manual value, then copy * the current volatile values. */ if (new_auto_val == master->manual_mode_value) update_from_auto_cluster(master); } /* * Copy the new caller-supplied control values. * user_to_new() sets 'is_new' to 1. */ do { struct v4l2_ctrl *ctrl = helpers[idx].ref->ctrl; ret = user_to_new(cs->controls + idx, ctrl); if (!ret && ctrl->is_ptr) { ret = validate_new(ctrl, ctrl->p_new); if (ret) dprintk(vdev, "failed to validate control %s (%d)\n", v4l2_ctrl_get_name(ctrl->id), ret); } idx = helpers[idx].next; } while (!ret && idx); if (!ret) ret = try_or_set_cluster(fh, master, !hdl->req_obj.req && set, 0); if (!ret && hdl->req_obj.req && set) { for (j = 0; j < master->ncontrols; j++) { struct v4l2_ctrl_ref *ref = find_ref(hdl, master->cluster[j]->id); new_to_req(ref); } } /* Copy the new values back to userspace. */ if (!ret) { idx = i; do { ret = new_to_user(cs->controls + idx, helpers[idx].ref->ctrl); idx = helpers[idx].next; } while (!ret && idx); } v4l2_ctrl_unlock(master); } if (cs->count > ARRAY_SIZE(helper)) kvfree(helpers); return ret; } static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, struct video_device *vdev, struct media_device *mdev, struct v4l2_ext_controls *cs, bool set) { int ret; if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) return try_set_ext_ctrls_request(fh, hdl, vdev, mdev, cs, set); ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set); if (ret) dprintk(vdev, "%s: try_set_ext_ctrls_common failed (%d)\n", video_device_node_name(vdev), ret); return ret; } int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, struct media_device *mdev, struct v4l2_ext_controls *cs) { return try_set_ext_ctrls(NULL, hdl, vdev, mdev, cs, false); } EXPORT_SYMBOL(v4l2_try_ext_ctrls); int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, struct video_device *vdev, struct media_device *mdev, struct v4l2_ext_controls *cs) { return try_set_ext_ctrls(fh, hdl, vdev, mdev, cs, true); } EXPORT_SYMBOL(v4l2_s_ext_ctrls); /* * VIDIOC_G/S_CTRL implementation */ /* Helper function to get a single control */ static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c) { struct v4l2_ctrl *master = ctrl->cluster[0]; int ret = 0; int i; /* Compound controls are not supported. The new_to_user() and * cur_to_user() calls below would need to be modified not to access * userspace memory when called from get_ctrl(). */ if (!ctrl->is_int && ctrl->type != V4L2_CTRL_TYPE_INTEGER64) return -EINVAL; if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY) return -EACCES; v4l2_ctrl_lock(master); /* g_volatile_ctrl will update the current control values */ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { for (i = 0; i < master->ncontrols; i++) cur_to_new(master->cluster[i]); ret = call_op(master, g_volatile_ctrl); if (!ret) ret = new_to_user(c, ctrl); } else { ret = cur_to_user(c, ctrl); } v4l2_ctrl_unlock(master); return ret; } int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control) { struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id); struct v4l2_ext_control c; int ret; if (!ctrl || !ctrl->is_int) return -EINVAL; ret = get_ctrl(ctrl, &c); if (!ret) control->value = c.value; return ret; } EXPORT_SYMBOL(v4l2_g_ctrl); /* Helper function for VIDIOC_S_CTRL compatibility */ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags) { struct v4l2_ctrl *master = ctrl->cluster[0]; int ret; int i; /* Reset the 'is_new' flags of the cluster */ for (i = 0; i < master->ncontrols; i++) if (master->cluster[i]) master->cluster[i]->is_new = 0; ret = validate_new(ctrl, ctrl->p_new); if (ret) return ret; /* * For autoclusters with volatiles that are switched from auto to * manual mode we have to update the current volatile values since * those will become the initial manual values after such a switch. */ if (master->is_auto && master->has_volatiles && ctrl == master && !is_cur_manual(master) && ctrl->val == master->manual_mode_value) update_from_auto_cluster(master); ctrl->is_new = 1; return try_or_set_cluster(fh, master, true, ch_flags); } /* Helper function for VIDIOC_S_CTRL compatibility */ static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c) { int ret; v4l2_ctrl_lock(ctrl); ret = user_to_new(c, ctrl); if (!ret) ret = set_ctrl(fh, ctrl, 0); if (!ret) ret = cur_to_user(c, ctrl); v4l2_ctrl_unlock(ctrl); return ret; } int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, struct v4l2_control *control) { struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id); struct v4l2_ext_control c = { control->id }; int ret; if (!ctrl || !ctrl->is_int) return -EINVAL; if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) return -EACCES; c.value = control->value; ret = set_ctrl_lock(fh, ctrl, &c); control->value = c.value; return ret; } EXPORT_SYMBOL(v4l2_s_ctrl); /* * Helper functions for drivers to get/set controls. */ s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_ext_control c; /* It's a driver bug if this happens. */ if (WARN_ON(!ctrl->is_int)) return 0; c.value = 0; get_ctrl(ctrl, &c); return c.value; } EXPORT_SYMBOL(v4l2_ctrl_g_ctrl); s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl) { struct v4l2_ext_control c; /* It's a driver bug if this happens. */ if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64)) return 0; c.value64 = 0; get_ctrl(ctrl, &c); return c.value64; } EXPORT_SYMBOL(v4l2_ctrl_g_ctrl_int64); int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val) { lockdep_assert_held(ctrl->handler->lock); /* It's a driver bug if this happens. */ if (WARN_ON(!ctrl->is_int)) return -EINVAL; ctrl->val = val; return set_ctrl(NULL, ctrl, 0); } EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl); int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val) { lockdep_assert_held(ctrl->handler->lock); /* It's a driver bug if this happens. */ if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64)) return -EINVAL; *ctrl->p_new.p_s64 = val; return set_ctrl(NULL, ctrl, 0); } EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_int64); int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) { lockdep_assert_held(ctrl->handler->lock); /* It's a driver bug if this happens. */ if (WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING)) return -EINVAL; strscpy(ctrl->p_new.p_char, s, ctrl->maximum + 1); return set_ctrl(NULL, ctrl, 0); } EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string); int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl, enum v4l2_ctrl_type type, const void *p) { lockdep_assert_held(ctrl->handler->lock); /* It's a driver bug if this happens. */ if (WARN_ON(ctrl->type != type)) return -EINVAL; /* Setting dynamic arrays is not (yet?) supported. */ if (WARN_ON(ctrl->is_dyn_array)) return -EINVAL; memcpy(ctrl->p_new.p, p, ctrl->elems * ctrl->elem_size); return set_ctrl(NULL, ctrl, 0); } EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_compound); /* * Modify the range of a control. */ int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, s64 min, s64 max, u64 step, s64 def) { bool value_changed; bool range_changed = false; int ret; lockdep_assert_held(ctrl->handler->lock); switch (ctrl->type) { case V4L2_CTRL_TYPE_INTEGER: case V4L2_CTRL_TYPE_INTEGER64: case V4L2_CTRL_TYPE_BOOLEAN: case V4L2_CTRL_TYPE_MENU: case V4L2_CTRL_TYPE_INTEGER_MENU: case V4L2_CTRL_TYPE_BITMASK: case V4L2_CTRL_TYPE_U8: case V4L2_CTRL_TYPE_U16: case V4L2_CTRL_TYPE_U32: if (ctrl->is_array) return -EINVAL; ret = check_range(ctrl->type, min, max, step, def); if (ret) return ret; break; default: return -EINVAL; } if (ctrl->minimum != min || ctrl->maximum != max || ctrl->step != step || ctrl->default_value != def) { range_changed = true; ctrl->minimum = min; ctrl->maximum = max; ctrl->step = step; ctrl->default_value = def; } cur_to_new(ctrl); if (validate_new(ctrl, ctrl->p_new)) { if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) *ctrl->p_new.p_s64 = def; else *ctrl->p_new.p_s32 = def; } if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) value_changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64; else value_changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32; if (value_changed) ret = set_ctrl(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); else if (range_changed) send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); return ret; } EXPORT_SYMBOL(__v4l2_ctrl_modify_range); int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl, u32 dims[V4L2_CTRL_MAX_DIMS]) { unsigned int elems = 1; unsigned int i; void *p_array; lockdep_assert_held(ctrl->handler->lock); if (!ctrl->is_array || ctrl->is_dyn_array) return -EINVAL; for (i = 0; i < ctrl->nr_of_dims; i++) elems *= dims[i]; if (elems == 0) return -EINVAL; p_array = kvzalloc(2 * elems * ctrl->elem_size, GFP_KERNEL); if (!p_array) return -ENOMEM; kvfree(ctrl->p_array); ctrl->p_array_alloc_elems = elems; ctrl->elems = elems; ctrl->new_elems = elems; ctrl->p_array = p_array; ctrl->p_new.p = p_array; ctrl->p_cur.p = p_array + elems * ctrl->elem_size; for (i = 0; i < ctrl->nr_of_dims; i++) ctrl->dims[i] = dims[i]; ctrl->type_ops->init(ctrl, 0, ctrl->p_cur); cur_to_new(ctrl); send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_VALUE | V4L2_EVENT_CTRL_CH_DIMENSIONS); return 0; } EXPORT_SYMBOL(__v4l2_ctrl_modify_dimensions); /* Implement VIDIOC_QUERY_EXT_CTRL */ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctrl *qc) { const unsigned int next_flags = V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND; u32 id = qc->id & V4L2_CTRL_ID_MASK; struct v4l2_ctrl_ref *ref; struct v4l2_ctrl *ctrl; if (!hdl) return -EINVAL; mutex_lock(hdl->lock); /* Try to find it */ ref = find_ref(hdl, id); if ((qc->id & next_flags) && !list_empty(&hdl->ctrl_refs)) { bool is_compound; /* Match any control that is not hidden */ unsigned int mask = 1; bool match = false; if ((qc->id & next_flags) == V4L2_CTRL_FLAG_NEXT_COMPOUND) { /* Match any hidden control */ match = true; } else if ((qc->id & next_flags) == next_flags) { /* Match any control, compound or not */ mask = 0; } /* Find the next control with ID > qc->id */ /* Did we reach the end of the control list? */ if (id >= node2id(hdl->ctrl_refs.prev)) { ref = NULL; /* Yes, so there is no next control */ } else if (ref) { struct v4l2_ctrl_ref *pos = ref; /* * We found a control with the given ID, so just get * the next valid one in the list. */ ref = NULL; list_for_each_entry_continue(pos, &hdl->ctrl_refs, node) { is_compound = pos->ctrl->is_array || pos->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES; if (id < pos->ctrl->id && (is_compound & mask) == match) { ref = pos; break; } } } else { struct v4l2_ctrl_ref *pos; /* * No control with the given ID exists, so start * searching for the next largest ID. We know there * is one, otherwise the first 'if' above would have * been true. */ list_for_each_entry(pos, &hdl->ctrl_refs, node) { is_compound = pos->ctrl->is_array || pos->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES; if (id < pos->ctrl->id && (is_compound & mask) == match) { ref = pos; break; } } } } mutex_unlock(hdl->lock); if (!ref) return -EINVAL; ctrl = ref->ctrl; memset(qc, 0, sizeof(*qc)); if (id >= V4L2_CID_PRIVATE_BASE) qc->id = id; else qc->id = ctrl->id; strscpy(qc->name, ctrl->name, sizeof(qc->name)); qc->flags = user_flags(ctrl); qc->type = ctrl->type; qc->elem_size = ctrl->elem_size; qc->elems = ctrl->elems; qc->nr_of_dims = ctrl->nr_of_dims; memcpy(qc->dims, ctrl->dims, qc->nr_of_dims * sizeof(qc->dims[0])); qc->minimum = ctrl->minimum; qc->maximum = ctrl->maximum; qc->default_value = ctrl->default_value; if (ctrl->type == V4L2_CTRL_TYPE_MENU || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU) qc->step = 1; else qc->step = ctrl->step; return 0; } EXPORT_SYMBOL(v4l2_query_ext_ctrl); void v4l2_query_ext_ctrl_to_v4l2_queryctrl(struct v4l2_queryctrl *to, const struct v4l2_query_ext_ctrl *from) { to->id = from->id; to->type = from->type; to->flags = from->flags; strscpy(to->name, from->name, sizeof(to->name)); switch (from->type) { case V4L2_CTRL_TYPE_INTEGER: case V4L2_CTRL_TYPE_BOOLEAN: case V4L2_CTRL_TYPE_MENU: case V4L2_CTRL_TYPE_INTEGER_MENU: case V4L2_CTRL_TYPE_STRING: case V4L2_CTRL_TYPE_BITMASK: to->minimum = from->minimum; to->maximum = from->maximum; to->step = from->step; to->default_value = from->default_value; break; default: to->minimum = 0; to->maximum = 0; to->step = 0; to->default_value = 0; break; } } EXPORT_SYMBOL(v4l2_query_ext_ctrl_to_v4l2_queryctrl); /* Implement VIDIOC_QUERYCTRL */ int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc) { struct v4l2_query_ext_ctrl qec = { qc->id }; int rc; rc = v4l2_query_ext_ctrl(hdl, &qec); if (rc) return rc; v4l2_query_ext_ctrl_to_v4l2_queryctrl(qc, &qec); return 0; } EXPORT_SYMBOL(v4l2_queryctrl); /* Implement VIDIOC_QUERYMENU */ int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm) { struct v4l2_ctrl *ctrl; u32 i = qm->index; ctrl = v4l2_ctrl_find(hdl, qm->id); if (!ctrl) return -EINVAL; qm->reserved = 0; /* Sanity checks */ switch (ctrl->type) { case V4L2_CTRL_TYPE_MENU: if (!ctrl->qmenu) return -EINVAL; break; case V4L2_CTRL_TYPE_INTEGER_MENU: if (!ctrl->qmenu_int) return -EINVAL; break; default: return -EINVAL; } if (i < ctrl->minimum || i > ctrl->maximum) return -EINVAL; /* Use mask to see if this menu item should be skipped */ if (i < BITS_PER_LONG_LONG && (ctrl->menu_skip_mask & BIT_ULL(i))) return -EINVAL; /* Empty menu items should also be skipped */ if (ctrl->type == V4L2_CTRL_TYPE_MENU) { if (!ctrl->qmenu[i] || ctrl->qmenu[i][0] == '\0') return -EINVAL; strscpy(qm->name, ctrl->qmenu[i], sizeof(qm->name)); } else { qm->value = ctrl->qmenu_int[i]; } return 0; } EXPORT_SYMBOL(v4l2_querymenu); /* * VIDIOC_LOG_STATUS helpers */ int v4l2_ctrl_log_status(struct file *file, void *priv) { struct video_device *vfd = video_devdata(file); if (vfd->v4l2_dev) { struct v4l2_fh *vfh = file_to_v4l2_fh(file); v4l2_ctrl_handler_log_status(vfh->ctrl_handler, vfd->v4l2_dev->name); } return 0; } EXPORT_SYMBOL(v4l2_ctrl_log_status); int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd) { v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name); return 0; } EXPORT_SYMBOL(v4l2_ctrl_subdev_log_status); /* * VIDIOC_(UN)SUBSCRIBE_EVENT implementation */ static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned int elems) { struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id); if (!ctrl) return -EINVAL; v4l2_ctrl_lock(ctrl); list_add_tail(&sev->node, &ctrl->ev_subs); if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS && (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL)) send_initial_event(sev->fh, ctrl); v4l2_ctrl_unlock(ctrl); return 0; } static void v4l2_ctrl_del_event(struct v4l2_subscribed_event *sev) { struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id); if (!ctrl) return; v4l2_ctrl_lock(ctrl); list_del(&sev->node); v4l2_ctrl_unlock(ctrl); } void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new) { u32 old_changes = old->u.ctrl.changes; old->u.ctrl = new->u.ctrl; old->u.ctrl.changes |= old_changes; } EXPORT_SYMBOL(v4l2_ctrl_replace); void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new) { new->u.ctrl.changes |= old->u.ctrl.changes; } EXPORT_SYMBOL(v4l2_ctrl_merge); const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops = { .add = v4l2_ctrl_add_event, .del = v4l2_ctrl_del_event, .replace = v4l2_ctrl_replace, .merge = v4l2_ctrl_merge, }; EXPORT_SYMBOL(v4l2_ctrl_sub_ev_ops); int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub) { if (sub->type == V4L2_EVENT_CTRL) return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops); return -EINVAL; } EXPORT_SYMBOL(v4l2_ctrl_subscribe_event); int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { if (!sd->ctrl_handler) return -EINVAL; return v4l2_ctrl_subscribe_event(fh, sub); } EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event); /* * poll helper */ __poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait) { struct v4l2_fh *fh = file_to_v4l2_fh(file); poll_wait(file, &fh->wait, wait); if (v4l2_event_pending(fh)) return EPOLLPRI; return 0; } EXPORT_SYMBOL(v4l2_ctrl_poll);
114 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 /* * Header file for reservations for dma-buf and ttm * * Copyright(C) 2011 Linaro Limited. All rights reserved. * Copyright (C) 2012-2013 Canonical Ltd * Copyright (C) 2012 Texas Instruments * * Authors: * Rob Clark <robdclark@gmail.com> * Maarten Lankhorst <maarten.lankhorst@canonical.com> * Thomas Hellstrom <thellstrom-at-vmware-dot-com> * * Based on bo.c which bears the following copyright notice, * but is dual licensed: * * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _LINUX_RESERVATION_H #define _LINUX_RESERVATION_H #include <linux/ww_mutex.h> #include <linux/dma-fence.h> #include <linux/slab.h> #include <linux/seqlock.h> #include <linux/rcupdate.h> extern struct ww_class reservation_ww_class; struct dma_resv_list; /** * enum dma_resv_usage - how the fences from a dma_resv obj are used * * This enum describes the different use cases for a dma_resv object and * controls which fences are returned when queried. * * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and * when the dma_resv object is asked for fences for one use case the fences * for the lower use case are returned as well. * * For example when asking for WRITE fences then the KERNEL fences are returned * as well. Similar when asked for READ fences then both WRITE and KERNEL * fences are returned as well. * * Already used fences can be promoted in the sense that a fence with * DMA_RESV_USAGE_BOOKKEEP could become DMA_RESV_USAGE_READ by adding it again * with this usage. But fences can never be degraded in the sense that a fence * with DMA_RESV_USAGE_WRITE could become DMA_RESV_USAGE_READ. */ enum dma_resv_usage { /** * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only. * * This should only be used for things like copying or clearing memory * with a DMA hardware engine for the purpose of kernel memory * management. * * Drivers *always* must wait for those fences before accessing the * resource protected by the dma_resv object. The only exception for * that is when the resource is known to be locked down in place by * pinning it previously. */ DMA_RESV_USAGE_KERNEL, /** * @DMA_RESV_USAGE_WRITE: Implicit write synchronization. * * This should only be used for userspace command submissions which add * an implicit write dependency. */ DMA_RESV_USAGE_WRITE, /** * @DMA_RESV_USAGE_READ: Implicit read synchronization. * * This should only be used for userspace command submissions which add * an implicit read dependency. */ DMA_RESV_USAGE_READ, /** * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync. * * This should be used by submissions which don't want to participate in * any implicit synchronization. * * The most common cases are preemption fences, page table updates, TLB * flushes as well as explicitly synced user submissions. * * Explicitly synced user submissions can be promoted to * DMA_RESV_USAGE_READ or DMA_RESV_USAGE_WRITE as needed using * dma_buf_import_sync_file() when implicit synchronization should * become necessary after initial adding of the fence. */ DMA_RESV_USAGE_BOOKKEEP }; /** * dma_resv_usage_rw - helper for implicit sync * @write: true if we create a new implicit sync write * * This returns the implicit synchronization usage for write or read accesses, * see enum dma_resv_usage and &dma_buf.resv. */ static inline enum dma_resv_usage dma_resv_usage_rw(bool write) { /* This looks confusing at first sight, but is indeed correct. * * The rational is that new write operations needs to wait for the * existing read and write operations to finish. * But a new read operation only needs to wait for the existing write * operations to finish. */ return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE; } /** * struct dma_resv - a reservation object manages fences for a buffer * * This is a container for dma_fence objects which needs to handle multiple use * cases. * * One use is to synchronize cross-driver access to a struct dma_buf, either for * dynamic buffer management or just to handle implicit synchronization between * different users of the buffer in userspace. See &dma_buf.resv for a more * in-depth discussion. * * The other major use is to manage access and locking within a driver in a * buffer based memory manager. struct ttm_buffer_object is the canonical * example here, since this is where reservation objects originated from. But * use in drivers is spreading and some drivers also manage struct * drm_gem_object with the same scheme. */ struct dma_resv { /** * @lock: * * Update side lock. Don't use directly, instead use the wrapper * functions like dma_resv_lock() and dma_resv_unlock(). * * Drivers which use the reservation object to manage memory dynamically * also use this lock to protect buffer object state like placement, * allocation policies or throughout command submission. */ struct ww_mutex lock; /** * @fences: * * Array of fences which where added to the dma_resv object * * A new fence is added by calling dma_resv_add_fence(). Since this * often needs to be done past the point of no return in command * submission it cannot fail, and therefore sufficient slots need to be * reserved by calling dma_resv_reserve_fences(). */ struct dma_resv_list __rcu *fences; }; /** * struct dma_resv_iter - current position into the dma_resv fences * * Don't touch this directly in the driver, use the accessor function instead. * * IMPORTANT * * When using the lockless iterators like dma_resv_iter_next_unlocked() or * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted. * Code which accumulates statistics or similar needs to check for this with * dma_resv_iter_is_restarted(). */ struct dma_resv_iter { /** @obj: The dma_resv object we iterate over */ struct dma_resv *obj; /** @usage: Return fences with this usage or lower. */ enum dma_resv_usage usage; /** @fence: the currently handled fence */ struct dma_fence *fence; /** @fence_usage: the usage of the current fence */ enum dma_resv_usage fence_usage; /** @index: index into the shared fences */ unsigned int index; /** @fences: the shared fences; private, *MUST* not dereference */ struct dma_resv_list *fences; /** @num_fences: number of fences */ unsigned int num_fences; /** @is_restarted: true if this is the first returned fence */ bool is_restarted; }; struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor); struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor); struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor); struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor); /** * dma_resv_iter_begin - initialize a dma_resv_iter object * @cursor: The dma_resv_iter object to initialize * @obj: The dma_resv object which we want to iterate over * @usage: controls which fences to include, see enum dma_resv_usage. */ static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor, struct dma_resv *obj, enum dma_resv_usage usage) { cursor->obj = obj; cursor->usage = usage; cursor->fence = NULL; } /** * dma_resv_iter_end - cleanup a dma_resv_iter object * @cursor: the dma_resv_iter object which should be cleaned up * * Make sure that the reference to the fence in the cursor is properly * dropped. */ static inline void dma_resv_iter_end(struct dma_resv_iter *cursor) { dma_fence_put(cursor->fence); } /** * dma_resv_iter_usage - Return the usage of the current fence * @cursor: the cursor of the current position * * Returns the usage of the currently processed fence. */ static inline enum dma_resv_usage dma_resv_iter_usage(struct dma_resv_iter *cursor) { return cursor->fence_usage; } /** * dma_resv_iter_is_restarted - test if this is the first fence after a restart * @cursor: the cursor with the current position * * Return true if this is the first fence in an iteration after a restart. */ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) { return cursor->is_restarted; } /** * dma_resv_for_each_fence_unlocked - unlocked fence iterator * @cursor: a struct dma_resv_iter pointer * @fence: the current fence * * Iterate over the fences in a struct dma_resv object without holding the * &dma_resv.lock and using RCU instead. The cursor needs to be initialized * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside * the iterator a reference to the dma_fence is held and the RCU lock dropped. * * Beware that the iterator can be restarted when the struct dma_resv for * @cursor is modified. Code which accumulates statistics or similar needs to * check for this with dma_resv_iter_is_restarted(). For this reason prefer the * lock iterator dma_resv_for_each_fence() whenever possible. */ #define dma_resv_for_each_fence_unlocked(cursor, fence) \ for (fence = dma_resv_iter_first_unlocked(cursor); \ fence; fence = dma_resv_iter_next_unlocked(cursor)) /** * dma_resv_for_each_fence - fence iterator * @cursor: a struct dma_resv_iter pointer * @obj: a dma_resv object pointer * @usage: controls which fences to return * @fence: the current fence * * Iterate over the fences in a struct dma_resv object while holding the * &dma_resv.lock. @all_fences controls if the shared fences are returned as * well. The cursor initialisation is part of the iterator and the fence stays * valid as long as the lock is held and so no extra reference to the fence is * taken. */ #define dma_resv_for_each_fence(cursor, obj, usage, fence) \ for (dma_resv_iter_begin(cursor, obj, usage), \ fence = dma_resv_iter_first(cursor); fence; \ fence = dma_resv_iter_next(cursor)) #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) #ifdef CONFIG_DEBUG_MUTEXES void dma_resv_reset_max_fences(struct dma_resv *obj); #else static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {} #endif /** * dma_resv_lock - lock the reservation object * @obj: the reservation object * @ctx: the locking context * * Locks the reservation object for exclusive access and modification. Note, * that the lock is only against other writers, readers will run concurrently * with a writer under RCU. The seqlock is used to notify readers if they * overlap with a writer. * * As the reservation object may be locked by multiple parties in an * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation * object may be locked by itself by passing NULL as @ctx. * * When a die situation is indicated by returning -EDEADLK all locks held by * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj. * * Unlocked by calling dma_resv_unlock(). * * See also dma_resv_lock_interruptible() for the interruptible variant. */ static inline int dma_resv_lock(struct dma_resv *obj, struct ww_acquire_ctx *ctx) { return ww_mutex_lock(&obj->lock, ctx); } /** * dma_resv_lock_interruptible - lock the reservation object * @obj: the reservation object * @ctx: the locking context * * Locks the reservation object interruptible for exclusive access and * modification. Note, that the lock is only against other writers, readers * will run concurrently with a writer under RCU. The seqlock is used to * notify readers if they overlap with a writer. * * As the reservation object may be locked by multiple parties in an * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation * object may be locked by itself by passing NULL as @ctx. * * When a die situation is indicated by returning -EDEADLK all locks held by * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on * @obj. * * Unlocked by calling dma_resv_unlock(). */ static inline int dma_resv_lock_interruptible(struct dma_resv *obj, struct ww_acquire_ctx *ctx) { return ww_mutex_lock_interruptible(&obj->lock, ctx); } /** * dma_resv_lock_slow - slowpath lock the reservation object * @obj: the reservation object * @ctx: the locking context * * Acquires the reservation object after a die case. This function * will sleep until the lock becomes available. See dma_resv_lock() as * well. * * See also dma_resv_lock_slow_interruptible() for the interruptible variant. */ static inline void dma_resv_lock_slow(struct dma_resv *obj, struct ww_acquire_ctx *ctx) { ww_mutex_lock_slow(&obj->lock, ctx); } /** * dma_resv_lock_slow_interruptible - slowpath lock the reservation * object, interruptible * @obj: the reservation object * @ctx: the locking context * * Acquires the reservation object interruptible after a die case. This function * will sleep until the lock becomes available. See * dma_resv_lock_interruptible() as well. */ static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj, struct ww_acquire_ctx *ctx) { return ww_mutex_lock_slow_interruptible(&obj->lock, ctx); } /** * dma_resv_trylock - trylock the reservation object * @obj: the reservation object * * Tries to lock the reservation object for exclusive access and modification. * Note, that the lock is only against other writers, readers will run * concurrently with a writer under RCU. The seqlock is used to notify readers * if they overlap with a writer. * * Also note that since no context is provided, no deadlock protection is * possible, which is also not needed for a trylock. * * Returns true if the lock was acquired, false otherwise. */ static inline bool __must_check dma_resv_trylock(struct dma_resv *obj) { return ww_mutex_trylock(&obj->lock, NULL); } /** * dma_resv_is_locked - is the reservation object locked * @obj: the reservation object * * Returns true if the mutex is locked, false if unlocked. */ static inline bool dma_resv_is_locked(struct dma_resv *obj) { return ww_mutex_is_locked(&obj->lock); } /** * dma_resv_locking_ctx - returns the context used to lock the object * @obj: the reservation object * * Returns the context used to lock a reservation object or NULL if no context * was used or the object is not locked at all. * * WARNING: This interface is pretty horrible, but TTM needs it because it * doesn't pass the struct ww_acquire_ctx around in some very long callchains. * Everyone else just uses it to check whether they're holding a reservation or * not. */ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) { return READ_ONCE(obj->lock.ctx); } /** * dma_resv_unlock - unlock the reservation object * @obj: the reservation object * * Unlocks the reservation object following exclusive access. */ static inline void dma_resv_unlock(struct dma_resv *obj) { dma_resv_reset_max_fences(obj); ww_mutex_unlock(&obj->lock); } void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, enum dma_resv_usage usage); void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, struct dma_fence *fence, enum dma_resv_usage usage); int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, unsigned int *num_fences, struct dma_fence ***fences); int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, struct dma_fence **fence); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, bool intr, unsigned long timeout); void dma_resv_set_deadline(struct dma_resv *obj, enum dma_resv_usage usage, ktime_t deadline); bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage); void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq); #endif /* _LINUX_RESERVATION_H */
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2000-2002 Vojtech Pavlik <vojtech@ucw.cz> * Copyright (c) 2001-2002, 2007 Johann Deneux <johann.deneux@gmail.com> * * USB/RS232 I-Force joysticks and wheels. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/circ_buf.h> #include <linux/mutex.h> /* This module provides arbitrary resource management routines. * I use it to manage the device's memory. * Despite the name of this module, I am *not* going to access the ioports. */ #include <linux/ioport.h> #define IFORCE_MAX_LENGTH 16 #define IFORCE_EFFECTS_MAX 32 /* Each force feedback effect is made of one core effect, which can be * associated to at most to effect modifiers */ #define FF_MOD1_IS_USED 0 #define FF_MOD2_IS_USED 1 #define FF_CORE_IS_USED 2 #define FF_CORE_IS_PLAYED 3 /* Effect is currently being played */ #define FF_CORE_SHOULD_PLAY 4 /* User wants the effect to be played */ #define FF_CORE_UPDATE 5 /* Effect is being updated */ #define FF_MODCORE_CNT 6 struct iforce_core_effect { /* Information about where modifiers are stored in the device's memory */ struct resource mod1_chunk; struct resource mod2_chunk; unsigned long flags[BITS_TO_LONGS(FF_MODCORE_CNT)]; }; #define FF_CMD_EFFECT 0x010e #define FF_CMD_ENVELOPE 0x0208 #define FF_CMD_MAGNITUDE 0x0303 #define FF_CMD_PERIOD 0x0407 #define FF_CMD_CONDITION 0x050a #define FF_CMD_AUTOCENTER 0x4002 #define FF_CMD_PLAY 0x4103 #define FF_CMD_ENABLE 0x4201 #define FF_CMD_GAIN 0x4301 #define FF_CMD_QUERY 0xff01 /* Buffer for async write */ #define XMIT_SIZE 256 #define XMIT_INC(var, n) (var)+=n; (var)&= XMIT_SIZE -1 /* iforce::xmit_flags */ #define IFORCE_XMIT_RUNNING 0 #define IFORCE_XMIT_AGAIN 1 struct iforce_device { u16 idvendor; u16 idproduct; char *name; signed short *btn; signed short *abs; signed short *ff; }; struct iforce; struct iforce_xport_ops { void (*xmit)(struct iforce *iforce); int (*get_id)(struct iforce *iforce, u8 id, u8 *response_data, size_t *response_len); int (*start_io)(struct iforce *iforce); void (*stop_io)(struct iforce *iforce); }; struct iforce { struct input_dev *dev; /* Input device interface */ struct iforce_device *type; const struct iforce_xport_ops *xport_ops; spinlock_t xmit_lock; /* Buffer used for asynchronous sending of bytes to the device */ struct circ_buf xmit; unsigned char xmit_data[XMIT_SIZE]; unsigned long xmit_flags[1]; /* Force Feedback */ wait_queue_head_t wait; struct resource device_memory; struct iforce_core_effect core_effects[IFORCE_EFFECTS_MAX]; struct mutex mem_mutex; }; /* Get hi and low bytes of a 16-bits int */ #define HI(a) ((unsigned char)((a) >> 8)) #define LO(a) ((unsigned char)((a) & 0xff)) /* For many parameters, it seems that 0x80 is a special value that should * be avoided. Instead, we replace this value by 0x7f */ #define HIFIX80(a) ((unsigned char)(((a)<0? (a)+255 : (a))>>8)) /* Encode a time value */ #define TIME_SCALE(a) (a) static inline int iforce_get_id_packet(struct iforce *iforce, u8 id, u8 *response_data, size_t *response_len) { return iforce->xport_ops->get_id(iforce, id, response_data, response_len); } static inline void iforce_clear_xmit_and_wake(struct iforce *iforce) { clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); wake_up_all(&iforce->wait); } /* Public functions */ /* iforce-main.c */ int iforce_init_device(struct device *parent, u16 bustype, struct iforce *iforce); /* iforce-packets.c */ int iforce_control_playback(struct iforce*, u16 id, unsigned int); void iforce_process_packet(struct iforce *iforce, u8 packet_id, u8 *data, size_t len); int iforce_send_packet(struct iforce *iforce, u16 cmd, unsigned char* data); void iforce_dump_packet(struct iforce *iforce, char *msg, u16 cmd, unsigned char *data); /* iforce-ff.c */ int iforce_upload_periodic(struct iforce *, struct ff_effect *, struct ff_effect *); int iforce_upload_constant(struct iforce *, struct ff_effect *, struct ff_effect *); int iforce_upload_condition(struct iforce *, struct ff_effect *, struct ff_effect *); /* Public variables */ extern struct serio_driver iforce_serio_drv; extern struct usb_driver iforce_usb_driver;
2 3 1 2 2 2 6 9 6 16 8 5 10 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Cloudflare Ltd https://cloudflare.com */ #include <linux/skmsg.h> #include <net/sock.h> #include <net/udp.h> #include <net/inet_common.h> #include "udp_impl.h" static struct proto *udpv6_prot_saved __read_mostly; static int sk_udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) return udpv6_prot_saved->recvmsg(sk, msg, len, flags, addr_len); #endif return udp_prot.recvmsg(sk, msg, len, flags, addr_len); } static bool udp_sk_has_data(struct sock *sk) { return !skb_queue_empty(&udp_sk(sk)->reader_queue) || !skb_queue_empty(&sk->sk_receive_queue); } static bool psock_has_data(struct sk_psock *psock) { return !skb_queue_empty(&psock->ingress_skb) || !sk_psock_queue_empty(psock); } #define udp_msg_has_data(__sk, __psock) \ ({ udp_sk_has_data(__sk) || psock_has_data(__psock); }) static int udp_msg_wait_data(struct sock *sk, struct sk_psock *psock, long timeo) { DEFINE_WAIT_FUNC(wait, woken_wake_function); int ret = 0; if (sk->sk_shutdown & RCV_SHUTDOWN) return 1; if (!timeo) return ret; add_wait_queue(sk_sleep(sk), &wait); sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); ret = udp_msg_has_data(sk, psock); if (!ret) { wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); ret = udp_msg_has_data(sk, psock); } sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); remove_wait_queue(sk_sleep(sk), &wait); return ret; } static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { struct sk_psock *psock; int copied, ret; if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); if (!len) return 0; psock = sk_psock_get(sk); if (unlikely(!psock)) return sk_udp_recvmsg(sk, msg, len, flags, addr_len); if (!psock_has_data(psock)) { ret = sk_udp_recvmsg(sk, msg, len, flags, addr_len); goto out; } msg_bytes_ready: copied = sk_msg_recvmsg(sk, psock, msg, len, flags); if (!copied) { long timeo; int data; timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); data = udp_msg_wait_data(sk, psock, timeo); if (data) { if (psock_has_data(psock)) goto msg_bytes_ready; ret = sk_udp_recvmsg(sk, msg, len, flags, addr_len); goto out; } copied = -EAGAIN; } ret = copied; out: sk_psock_put(sk, psock); return ret; } enum { UDP_BPF_IPV4, UDP_BPF_IPV6, UDP_BPF_NUM_PROTS, }; static DEFINE_SPINLOCK(udpv6_prot_lock); static struct proto udp_bpf_prots[UDP_BPF_NUM_PROTS]; static void udp_bpf_rebuild_protos(struct proto *prot, const struct proto *base) { *prot = *base; prot->close = sock_map_close; prot->recvmsg = udp_bpf_recvmsg; prot->sock_is_readable = sk_msg_is_readable; } static void udp_bpf_check_v6_needs_rebuild(struct proto *ops) { if (unlikely(ops != smp_load_acquire(&udpv6_prot_saved))) { spin_lock_bh(&udpv6_prot_lock); if (likely(ops != udpv6_prot_saved)) { udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV6], ops); smp_store_release(&udpv6_prot_saved, ops); } spin_unlock_bh(&udpv6_prot_lock); } } static int __init udp_bpf_v4_build_proto(void) { udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV4], &udp_prot); return 0; } late_initcall(udp_bpf_v4_build_proto); int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) { int family = sk->sk_family == AF_INET ? UDP_BPF_IPV4 : UDP_BPF_IPV6; if (restore) { sk->sk_write_space = psock->saved_write_space; sock_replace_proto(sk, psock->sk_proto); return 0; } if (sk->sk_family == AF_INET6) udp_bpf_check_v6_needs_rebuild(psock->sk_proto); sock_replace_proto(sk, &udp_bpf_prots[family]); return 0; } EXPORT_SYMBOL_GPL(udp_bpf_update_proto);
28 35 31 1 1 6 2 4 3 4 8 8 8 3 7 6 6 2 6 2 1 6 1 2 1 1 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 // SPDX-License-Identifier: GPL-2.0-or-later /* * NetLabel Network Address Lists * * This file contains network address list functions used to manage ordered * lists of network addresses for use by the NetLabel subsystem. The NetLabel * system manages static and dynamic label mappings for network protocols such * as CIPSO and RIPSO. * * Author: Paul Moore <paul@paul-moore.com> */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2008 */ #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/ip.h> #include <net/ipv6.h> #include <linux/audit.h> #include "netlabel_addrlist.h" /* * Address List Functions */ /** * netlbl_af4list_search - Search for a matching IPv4 address entry * @addr: IPv4 address * @head: the list head * * Description: * Searches the IPv4 address list given by @head. If a matching address entry * is found it is returned, otherwise NULL is returned. The caller is * responsible for calling the rcu_read_[un]lock() functions. * */ struct netlbl_af4list *netlbl_af4list_search(__be32 addr, struct list_head *head) { struct netlbl_af4list *iter; list_for_each_entry_rcu(iter, head, list) if (iter->valid && (addr & iter->mask) == iter->addr) return iter; return NULL; } /** * netlbl_af4list_search_exact - Search for an exact IPv4 address entry * @addr: IPv4 address * @mask: IPv4 address mask * @head: the list head * * Description: * Searches the IPv4 address list given by @head. If an exact match if found * it is returned, otherwise NULL is returned. The caller is responsible for * calling the rcu_read_[un]lock() functions. * */ struct netlbl_af4list *netlbl_af4list_search_exact(__be32 addr, __be32 mask, struct list_head *head) { struct netlbl_af4list *iter; list_for_each_entry_rcu(iter, head, list) if (iter->valid && iter->addr == addr && iter->mask == mask) return iter; return NULL; } #if IS_ENABLED(CONFIG_IPV6) /** * netlbl_af6list_search - Search for a matching IPv6 address entry * @addr: IPv6 address * @head: the list head * * Description: * Searches the IPv6 address list given by @head. If a matching address entry * is found it is returned, otherwise NULL is returned. The caller is * responsible for calling the rcu_read_[un]lock() functions. * */ struct netlbl_af6list *netlbl_af6list_search(const struct in6_addr *addr, struct list_head *head) { struct netlbl_af6list *iter; list_for_each_entry_rcu(iter, head, list) if (iter->valid && ipv6_masked_addr_cmp(&iter->addr, &iter->mask, addr) == 0) return iter; return NULL; } /** * netlbl_af6list_search_exact - Search for an exact IPv6 address entry * @addr: IPv6 address * @mask: IPv6 address mask * @head: the list head * * Description: * Searches the IPv6 address list given by @head. If an exact match if found * it is returned, otherwise NULL is returned. The caller is responsible for * calling the rcu_read_[un]lock() functions. * */ struct netlbl_af6list *netlbl_af6list_search_exact(const struct in6_addr *addr, const struct in6_addr *mask, struct list_head *head) { struct netlbl_af6list *iter; list_for_each_entry_rcu(iter, head, list) if (iter->valid && ipv6_addr_equal(&iter->addr, addr) && ipv6_addr_equal(&iter->mask, mask)) return iter; return NULL; } #endif /* IPv6 */ /** * netlbl_af4list_add - Add a new IPv4 address entry to a list * @entry: address entry * @head: the list head * * Description: * Add a new address entry to the list pointed to by @head. On success zero is * returned, otherwise a negative value is returned. The caller is responsible * for calling the necessary locking functions. * */ int netlbl_af4list_add(struct netlbl_af4list *entry, struct list_head *head) { struct netlbl_af4list *iter; iter = netlbl_af4list_search(entry->addr, head); if (iter != NULL && iter->addr == entry->addr && iter->mask == entry->mask) return -EEXIST; /* in order to speed up address searches through the list (the common * case) we need to keep the list in order based on the size of the * address mask such that the entry with the widest mask (smallest * numerical value) appears first in the list */ list_for_each_entry_rcu(iter, head, list) if (iter->valid && ntohl(entry->mask) > ntohl(iter->mask)) { __list_add_rcu(&entry->list, iter->list.prev, &iter->list); return 0; } list_add_tail_rcu(&entry->list, head); return 0; } #if IS_ENABLED(CONFIG_IPV6) /** * netlbl_af6list_add - Add a new IPv6 address entry to a list * @entry: address entry * @head: the list head * * Description: * Add a new address entry to the list pointed to by @head. On success zero is * returned, otherwise a negative value is returned. The caller is responsible * for calling the necessary locking functions. * */ int netlbl_af6list_add(struct netlbl_af6list *entry, struct list_head *head) { struct netlbl_af6list *iter; iter = netlbl_af6list_search(&entry->addr, head); if (iter != NULL && ipv6_addr_equal(&iter->addr, &entry->addr) && ipv6_addr_equal(&iter->mask, &entry->mask)) return -EEXIST; /* in order to speed up address searches through the list (the common * case) we need to keep the list in order based on the size of the * address mask such that the entry with the widest mask (smallest * numerical value) appears first in the list */ list_for_each_entry_rcu(iter, head, list) if (iter->valid && ipv6_addr_cmp(&entry->mask, &iter->mask) > 0) { __list_add_rcu(&entry->list, iter->list.prev, &iter->list); return 0; } list_add_tail_rcu(&entry->list, head); return 0; } #endif /* IPv6 */ /** * netlbl_af4list_remove_entry - Remove an IPv4 address entry * @entry: address entry * * Description: * Remove the specified IP address entry. The caller is responsible for * calling the necessary locking functions. * */ void netlbl_af4list_remove_entry(struct netlbl_af4list *entry) { entry->valid = 0; list_del_rcu(&entry->list); } /** * netlbl_af4list_remove - Remove an IPv4 address entry * @addr: IP address * @mask: IP address mask * @head: the list head * * Description: * Remove an IP address entry from the list pointed to by @head. Returns the * entry on success, NULL on failure. The caller is responsible for calling * the necessary locking functions. * */ struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask, struct list_head *head) { struct netlbl_af4list *entry; entry = netlbl_af4list_search_exact(addr, mask, head); if (entry == NULL) return NULL; netlbl_af4list_remove_entry(entry); return entry; } #if IS_ENABLED(CONFIG_IPV6) /** * netlbl_af6list_remove_entry - Remove an IPv6 address entry * @entry: address entry * * Description: * Remove the specified IP address entry. The caller is responsible for * calling the necessary locking functions. * */ void netlbl_af6list_remove_entry(struct netlbl_af6list *entry) { entry->valid = 0; list_del_rcu(&entry->list); } /** * netlbl_af6list_remove - Remove an IPv6 address entry * @addr: IP address * @mask: IP address mask * @head: the list head * * Description: * Remove an IP address entry from the list pointed to by @head. Returns the * entry on success, NULL on failure. The caller is responsible for calling * the necessary locking functions. * */ struct netlbl_af6list *netlbl_af6list_remove(const struct in6_addr *addr, const struct in6_addr *mask, struct list_head *head) { struct netlbl_af6list *entry; entry = netlbl_af6list_search_exact(addr, mask, head); if (entry == NULL) return NULL; netlbl_af6list_remove_entry(entry); return entry; } #endif /* IPv6 */ /* * Audit Helper Functions */ #ifdef CONFIG_AUDIT /** * netlbl_af4list_audit_addr - Audit an IPv4 address * @audit_buf: audit buffer * @src: true if source address, false if destination * @dev: network interface * @addr: IP address * @mask: IP address mask * * Description: * Write the IPv4 address and address mask, if necessary, to @audit_buf. * */ void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf, int src, const char *dev, __be32 addr, __be32 mask) { u32 mask_val = ntohl(mask); char *dir = (src ? "src" : "dst"); if (dev != NULL) audit_log_format(audit_buf, " netif=%s", dev); audit_log_format(audit_buf, " %s=%pI4", dir, &addr); if (mask_val != 0xffffffff) { u32 mask_len = 0; while (mask_val > 0) { mask_val <<= 1; mask_len++; } audit_log_format(audit_buf, " %s_prefixlen=%d", dir, mask_len); } } #if IS_ENABLED(CONFIG_IPV6) /** * netlbl_af6list_audit_addr - Audit an IPv6 address * @audit_buf: audit buffer * @src: true if source address, false if destination * @dev: network interface * @addr: IP address * @mask: IP address mask * * Description: * Write the IPv6 address and address mask, if necessary, to @audit_buf. * */ void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf, int src, const char *dev, const struct in6_addr *addr, const struct in6_addr *mask) { char *dir = (src ? "src" : "dst"); if (dev != NULL) audit_log_format(audit_buf, " netif=%s", dev); audit_log_format(audit_buf, " %s=%pI6", dir, addr); if (ntohl(mask->s6_addr32[3]) != 0xffffffff) { u32 mask_len = 0; u32 mask_val; int iter = -1; while (ntohl(mask->s6_addr32[++iter]) == 0xffffffff) mask_len += 32; mask_val = ntohl(mask->s6_addr32[iter]); while (mask_val > 0) { mask_val <<= 1; mask_len++; } audit_log_format(audit_buf, " %s_prefixlen=%d", dir, mask_len); } } #endif /* IPv6 */ #endif /* CONFIG_AUDIT */
589 25 589 20 590 19 584 52 589 4 1 580 579 13 15 18 50 579 1 580 579 50 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/io_uring.h> #include <uapi/linux/io_uring.h> #include "io_uring.h" #include "rsrc.h" #include "nop.h" struct io_nop { /* NOTE: kiocb has the file as the first member, so don't do it here */ struct file *file; int result; int fd; unsigned int flags; __u64 extra1; __u64 extra2; }; #define NOP_FLAGS (IORING_NOP_INJECT_RESULT | IORING_NOP_FIXED_FILE | \ IORING_NOP_FIXED_BUFFER | IORING_NOP_FILE | \ IORING_NOP_TW | IORING_NOP_CQE32) int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_nop *nop = io_kiocb_to_cmd(req, struct io_nop); nop->flags = READ_ONCE(sqe->nop_flags); if (nop->flags & ~NOP_FLAGS) return -EINVAL; if (nop->flags & IORING_NOP_INJECT_RESULT) nop->result = READ_ONCE(sqe->len); else nop->result = 0; if (nop->flags & IORING_NOP_FILE) nop->fd = READ_ONCE(sqe->fd); else nop->fd = -1; if (nop->flags & IORING_NOP_FIXED_BUFFER) req->buf_index = READ_ONCE(sqe->buf_index); if (nop->flags & IORING_NOP_CQE32) { struct io_ring_ctx *ctx = req->ctx; if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED))) return -EINVAL; nop->extra1 = READ_ONCE(sqe->off); nop->extra2 = READ_ONCE(sqe->addr); } return 0; } int io_nop(struct io_kiocb *req, unsigned int issue_flags) { struct io_nop *nop = io_kiocb_to_cmd(req, struct io_nop); int ret = nop->result; if (nop->flags & IORING_NOP_FILE) { if (nop->flags & IORING_NOP_FIXED_FILE) { req->file = io_file_get_fixed(req, nop->fd, issue_flags); req->flags |= REQ_F_FIXED_FILE; } else { req->file = io_file_get_normal(req, nop->fd); } if (!req->file) { ret = -EBADF; goto done; } } if (nop->flags & IORING_NOP_FIXED_BUFFER) { if (!io_find_buf_node(req, issue_flags)) ret = -EFAULT; } done: if (ret < 0) req_set_fail(req); if (nop->flags & IORING_NOP_CQE32) io_req_set_res32(req, nop->result, 0, nop->extra1, nop->extra2); else io_req_set_res(req, nop->result, 0); if (nop->flags & IORING_NOP_TW) { req->io_task_work.func = io_req_task_complete; io_req_task_work_add(req); return IOU_ISSUE_SKIP_COMPLETE; } return IOU_COMPLETE; }
6 6 1 1 155 2 2 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 // SPDX-License-Identifier: MIT #include <linux/export.h> #include <linux/fb.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_fb_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_shmem_helper.h> /* * struct fb_ops */ static int drm_fbdev_shmem_fb_open(struct fb_info *info, int user) { struct drm_fb_helper *fb_helper = info->par; /* No need to take a ref for fbcon because it unbinds on unregister */ if (user && !try_module_get(fb_helper->dev->driver->fops->owner)) return -ENODEV; return 0; } static int drm_fbdev_shmem_fb_release(struct fb_info *info, int user) { struct drm_fb_helper *fb_helper = info->par; if (user) module_put(fb_helper->dev->driver->fops->owner); return 0; } FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(drm_fbdev_shmem, drm_fb_helper_damage_range, drm_fb_helper_damage_area); static int drm_fbdev_shmem_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct drm_fb_helper *fb_helper = info->par; struct drm_framebuffer *fb = fb_helper->fb; struct drm_gem_object *obj = drm_gem_fb_get_obj(fb, 0); struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); if (shmem->map_wc) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); return fb_deferred_io_mmap(info, vma); } static void drm_fbdev_shmem_fb_destroy(struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; if (!fb_helper->dev) return; fb_deferred_io_cleanup(info); drm_fb_helper_fini(fb_helper); drm_client_buffer_vunmap(fb_helper->buffer); drm_client_framebuffer_delete(fb_helper->buffer); drm_client_release(&fb_helper->client); drm_fb_helper_unprepare(fb_helper); kfree(fb_helper); } static const struct fb_ops drm_fbdev_shmem_fb_ops = { .owner = THIS_MODULE, .fb_open = drm_fbdev_shmem_fb_open, .fb_release = drm_fbdev_shmem_fb_release, __FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_shmem), DRM_FB_HELPER_DEFAULT_OPS, __FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_shmem), .fb_mmap = drm_fbdev_shmem_fb_mmap, .fb_destroy = drm_fbdev_shmem_fb_destroy, }; static struct page *drm_fbdev_shmem_get_page(struct fb_info *info, unsigned long offset) { struct drm_fb_helper *fb_helper = info->par; struct drm_framebuffer *fb = fb_helper->fb; struct drm_gem_object *obj = drm_gem_fb_get_obj(fb, 0); struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); unsigned int i = offset >> PAGE_SHIFT; struct page *page; if (fb_WARN_ON_ONCE(info, offset > obj->size)) return NULL; page = shmem->pages[i]; // protected by active vmap if (page) get_page(page); fb_WARN_ON_ONCE(info, !page); return page; } /* * struct drm_fb_helper */ static int drm_fbdev_shmem_helper_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip) { struct drm_device *dev = helper->dev; int ret; /* Call damage handlers only if necessary */ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) return 0; if (helper->fb->funcs->dirty) { ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) return ret; } return 0; } static const struct drm_fb_helper_funcs drm_fbdev_shmem_helper_funcs = { .fb_dirty = drm_fbdev_shmem_helper_fb_dirty, }; /* * struct drm_driver */ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, struct drm_fb_helper_surface_size *sizes) { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; struct drm_client_buffer *buffer; struct drm_gem_shmem_object *shmem; struct drm_framebuffer *fb; struct fb_info *info; u32 format; struct iosys_map map; int ret; drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", sizes->surface_width, sizes->surface_height, sizes->surface_bpp); format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, sizes->surface_depth); buffer = drm_client_framebuffer_create(client, sizes->surface_width, sizes->surface_height, format); if (IS_ERR(buffer)) return PTR_ERR(buffer); shmem = to_drm_gem_shmem_obj(buffer->gem); fb = buffer->fb; ret = drm_client_buffer_vmap(buffer, &map); if (ret) { goto err_drm_client_buffer_delete; } else if (drm_WARN_ON(dev, map.is_iomem)) { ret = -ENODEV; /* I/O memory not supported; use generic emulation */ goto err_drm_client_buffer_delete; } fb_helper->funcs = &drm_fbdev_shmem_helper_funcs; fb_helper->buffer = buffer; fb_helper->fb = fb; info = drm_fb_helper_alloc_info(fb_helper); if (IS_ERR(info)) { ret = PTR_ERR(info); goto err_drm_client_buffer_vunmap; } drm_fb_helper_fill_info(info, fb_helper, sizes); info->fbops = &drm_fbdev_shmem_fb_ops; /* screen */ info->flags |= FBINFO_VIRTFB; /* system memory */ if (!shmem->map_wc) info->flags |= FBINFO_READS_FAST; /* signal caching */ info->screen_size = sizes->surface_height * fb->pitches[0]; info->screen_buffer = map.vaddr; info->fix.smem_len = info->screen_size; /* deferred I/O */ fb_helper->fbdefio.delay = HZ / 20; fb_helper->fbdefio.get_page = drm_fbdev_shmem_get_page; fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; info->fbdefio = &fb_helper->fbdefio; ret = fb_deferred_io_init(info); if (ret) goto err_drm_fb_helper_release_info; return 0; err_drm_fb_helper_release_info: drm_fb_helper_release_info(fb_helper); err_drm_client_buffer_vunmap: fb_helper->fb = NULL; fb_helper->buffer = NULL; drm_client_buffer_vunmap(buffer); err_drm_client_buffer_delete: drm_client_framebuffer_delete(buffer); return ret; } EXPORT_SYMBOL(drm_fbdev_shmem_driver_fbdev_probe);
627 320 1 1 624 628 627 627 320 1 630 703 330 389 700 680 680 682 21 313 389 678 630 629 322 625 630 322 428 1 428 431 1 431 447 449 427 428 430 289 250 24 43 369 62 82 279 199 202 217 70 10 10 10 15 10 15 10 15 10 3 6 6 6 72 73 2 69 6 68 14 15 73 1 70 1 36 2 34 10 10 648 572 93 93 652 341 342 341 342 2 489 493 391 181 490 492 183 173 10 304 302 225 126 297 299 302 11 10 3 273 9 227 346 13 13 182 182 276 275 277 61 217 1 23 254 275 22 273 272 273 1 1 275 272 1 2 2 2 2 433 435 6 436 18 433 479 479 275 67 435 436 18 436 51 413 428 430 337 170 425 430 429 64 376 431 431 428 587 587 431 431 21 431 520 171 587 429 431 21 586 431 152 162 161 18 14 29 29 29 18 11 9 17 18 10 11 9 783 783 785 275 274 272 1 272 270 4 37 37 37 37 2 2 1 1 1 14 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/sch_generic.c Generic packet scheduler routines. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 * - Ingress support */ #include <linux/bitops.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/if_vlan.h> #include <linux/skb_array.h> #include <linux/if_macvlan.h> #include <linux/bpf.h> #include <net/sch_generic.h> #include <net/pkt_sched.h> #include <net/dst.h> #include <net/hotdata.h> #include <trace/events/qdisc.h> #include <trace/events/net.h> #include <net/xfrm.h> /* Qdisc to use by default */ const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; EXPORT_SYMBOL(default_qdisc_ops); static void qdisc_maybe_clear_missed(struct Qdisc *q, const struct netdev_queue *txq) { clear_bit(__QDISC_STATE_MISSED, &q->state); /* Make sure the below netif_xmit_frozen_or_stopped() * checking happens after clearing STATE_MISSED. */ smp_mb__after_atomic(); /* Checking netif_xmit_frozen_or_stopped() again to * make sure STATE_MISSED is set if the STATE_MISSED * set by netif_tx_wake_queue()'s rescheduling of * net_tx_action() is cleared by the above clear_bit(). */ if (!netif_xmit_frozen_or_stopped(txq)) set_bit(__QDISC_STATE_MISSED, &q->state); else set_bit(__QDISC_STATE_DRAINING, &q->state); } /* Main transmission queue. */ /* Modifications to data participating in scheduling must be protected with * qdisc_lock(qdisc) spinlock. * * The idea is the following: * - enqueue, dequeue are serialized via qdisc root lock * - ingress filtering is also serialized via qdisc root lock * - updates to tree and tree walking are only done under the rtnl mutex. */ #define SKB_XOFF_MAGIC ((struct sk_buff *)1UL) static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q) { const struct netdev_queue *txq = q->dev_queue; spinlock_t *lock = NULL; struct sk_buff *skb; if (q->flags & TCQ_F_NOLOCK) { lock = qdisc_lock(q); spin_lock(lock); } skb = skb_peek(&q->skb_bad_txq); if (skb) { /* check the reason of requeuing without tx lock first */ txq = skb_get_tx_queue(txq->dev, skb); if (!netif_xmit_frozen_or_stopped(txq)) { skb = __skb_dequeue(&q->skb_bad_txq); if (qdisc_is_percpu_stats(q)) { qdisc_qstats_cpu_backlog_dec(q, skb); qdisc_qstats_cpu_qlen_dec(q); } else { qdisc_qstats_backlog_dec(q, skb); q->q.qlen--; } } else { skb = SKB_XOFF_MAGIC; qdisc_maybe_clear_missed(q, txq); } } if (lock) spin_unlock(lock); return skb; } static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q) { struct sk_buff *skb = skb_peek(&q->skb_bad_txq); if (unlikely(skb)) skb = __skb_dequeue_bad_txq(q); return skb; } static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q, struct sk_buff *skb) { spinlock_t *lock = NULL; if (q->flags & TCQ_F_NOLOCK) { lock = qdisc_lock(q); spin_lock(lock); } __skb_queue_tail(&q->skb_bad_txq, skb); if (qdisc_is_percpu_stats(q)) { qdisc_qstats_cpu_backlog_inc(q, skb); qdisc_qstats_cpu_qlen_inc(q); } else { qdisc_qstats_backlog_inc(q, skb); q->q.qlen++; } if (lock) spin_unlock(lock); } static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) { spinlock_t *lock = NULL; if (q->flags & TCQ_F_NOLOCK) { lock = qdisc_lock(q); spin_lock(lock); } while (skb) { struct sk_buff *next = skb->next; __skb_queue_tail(&q->gso_skb, skb); /* it's still part of the queue */ if (qdisc_is_percpu_stats(q)) { qdisc_qstats_cpu_requeues_inc(q); qdisc_qstats_cpu_backlog_inc(q, skb); qdisc_qstats_cpu_qlen_inc(q); } else { q->qstats.requeues++; qdisc_qstats_backlog_inc(q, skb); q->q.qlen++; } skb = next; } if (lock) { spin_unlock(lock); set_bit(__QDISC_STATE_MISSED, &q->state); } else { __netif_schedule(q); } } static void try_bulk_dequeue_skb(struct Qdisc *q, struct sk_buff *skb, const struct netdev_queue *txq, int *packets) { int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; while (bytelimit > 0) { struct sk_buff *nskb = q->dequeue(q); if (!nskb) break; bytelimit -= nskb->len; /* covers GSO len */ skb->next = nskb; skb = nskb; (*packets)++; /* GSO counts as one pkt */ } skb_mark_not_on_list(skb); } /* This variant of try_bulk_dequeue_skb() makes sure * all skbs in the chain are for the same txq */ static void try_bulk_dequeue_skb_slow(struct Qdisc *q, struct sk_buff *skb, int *packets) { int mapping = skb_get_queue_mapping(skb); struct sk_buff *nskb; int cnt = 0; do { nskb = q->dequeue(q); if (!nskb) break; if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { qdisc_enqueue_skb_bad_txq(q, nskb); break; } skb->next = nskb; skb = nskb; } while (++cnt < 8); (*packets) += cnt; skb_mark_not_on_list(skb); } /* Note that dequeue_skb can possibly return a SKB list (via skb->next). * A requeued skb (via q->gso_skb) can also be a SKB list. */ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, int *packets) { const struct netdev_queue *txq = q->dev_queue; struct sk_buff *skb = NULL; *packets = 1; if (unlikely(!skb_queue_empty(&q->gso_skb))) { spinlock_t *lock = NULL; if (q->flags & TCQ_F_NOLOCK) { lock = qdisc_lock(q); spin_lock(lock); } skb = skb_peek(&q->gso_skb); /* skb may be null if another cpu pulls gso_skb off in between * empty check and lock. */ if (!skb) { if (lock) spin_unlock(lock); goto validate; } /* skb in gso_skb were already validated */ *validate = false; if (xfrm_offload(skb)) *validate = true; /* check the reason of requeuing without tx lock first */ txq = skb_get_tx_queue(txq->dev, skb); if (!netif_xmit_frozen_or_stopped(txq)) { skb = __skb_dequeue(&q->gso_skb); if (qdisc_is_percpu_stats(q)) { qdisc_qstats_cpu_backlog_dec(q, skb); qdisc_qstats_cpu_qlen_dec(q); } else { qdisc_qstats_backlog_dec(q, skb); q->q.qlen--; } } else { skb = NULL; qdisc_maybe_clear_missed(q, txq); } if (lock) spin_unlock(lock); goto trace; } validate: *validate = true; if ((q->flags & TCQ_F_ONETXQUEUE) && netif_xmit_frozen_or_stopped(txq)) { qdisc_maybe_clear_missed(q, txq); return skb; } skb = qdisc_dequeue_skb_bad_txq(q); if (unlikely(skb)) { if (skb == SKB_XOFF_MAGIC) return NULL; goto bulk; } skb = q->dequeue(q); if (skb) { bulk: if (qdisc_may_bulk(q)) try_bulk_dequeue_skb(q, skb, txq, packets); else try_bulk_dequeue_skb_slow(q, skb, packets); } trace: trace_qdisc_dequeue(q, txq, *packets, skb); return skb; } /* * Transmit possibly several skbs, and handle the return status as * required. Owning qdisc running bit guarantees that only one CPU * can execute this function. * * Returns to the caller: * false - hardware queue frozen backoff * true - feel free to send more pkts */ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq, spinlock_t *root_lock, bool validate) { int ret = NETDEV_TX_BUSY; bool again = false; /* And release qdisc */ if (root_lock) spin_unlock(root_lock); /* Note that we validate skb (GSO, checksum, ...) outside of locks */ if (validate) skb = validate_xmit_skb_list(skb, dev, &again); #ifdef CONFIG_XFRM_OFFLOAD if (unlikely(again)) { if (root_lock) spin_lock(root_lock); dev_requeue_skb(skb, q); return false; } #endif if (likely(skb)) { HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_stopped(txq)) skb = dev_hard_start_xmit(skb, dev, txq, &ret); else qdisc_maybe_clear_missed(q, txq); HARD_TX_UNLOCK(dev, txq); } else { if (root_lock) spin_lock(root_lock); return true; } if (root_lock) spin_lock(root_lock); if (!dev_xmit_complete(ret)) { /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely(ret != NETDEV_TX_BUSY)) net_warn_ratelimited("BUG %s code %d qlen %d\n", dev->name, ret, q->q.qlen); dev_requeue_skb(skb, q); return false; } return true; } /* * NOTE: Called under qdisc_lock(q) with locally disabled BH. * * running seqcount guarantees only one CPU can process * this qdisc at a time. qdisc_lock(q) serializes queue accesses for * this queue. * * netif_tx_lock serializes accesses to device driver. * * qdisc_lock(q) and netif_tx_lock are mutually exclusive, * if one is grabbed, another must be free. * * Note, that this procedure can be called by a watchdog timer * * Returns to the caller: * 0 - queue is empty or throttled. * >0 - queue is not empty. * */ static inline bool qdisc_restart(struct Qdisc *q, int *packets) { spinlock_t *root_lock = NULL; struct netdev_queue *txq; struct net_device *dev; struct sk_buff *skb; bool validate; /* Dequeue packet */ skb = dequeue_skb(q, &validate, packets); if (unlikely(!skb)) return false; if (!(q->flags & TCQ_F_NOLOCK)) root_lock = qdisc_lock(q); dev = qdisc_dev(q); txq = skb_get_tx_queue(dev, skb); return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); } void __qdisc_run(struct Qdisc *q) { int quota = READ_ONCE(net_hotdata.dev_tx_weight); int packets; while (qdisc_restart(q, &packets)) { quota -= packets; if (quota <= 0) { if (q->flags & TCQ_F_NOLOCK) set_bit(__QDISC_STATE_MISSED, &q->state); else __netif_schedule(q); break; } } } unsigned long dev_trans_start(struct net_device *dev) { unsigned long res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start); unsigned long val; unsigned int i; for (i = 1; i < dev->num_tx_queues; i++) { val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start); if (val && time_after(val, res)) res = val; } return res; } EXPORT_SYMBOL(dev_trans_start); static void netif_freeze_queues(struct net_device *dev) { unsigned int i; int cpu; cpu = smp_processor_id(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); /* We are the only thread of execution doing a * freeze, but we have to grab the _xmit_lock in * order to synchronize with threads which are in * the ->hard_start_xmit() handler and already * checked the frozen bit. */ __netif_tx_lock(txq, cpu); set_bit(__QUEUE_STATE_FROZEN, &txq->state); __netif_tx_unlock(txq); } } void netif_tx_lock(struct net_device *dev) { spin_lock(&dev->tx_global_lock); netif_freeze_queues(dev); } EXPORT_SYMBOL(netif_tx_lock); static void netif_unfreeze_queues(struct net_device *dev) { unsigned int i; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); /* No need to grab the _xmit_lock here. If the * queue is not stopped for another reason, we * force a schedule. */ clear_bit(__QUEUE_STATE_FROZEN, &txq->state); netif_schedule_queue(txq); } } void netif_tx_unlock(struct net_device *dev) { netif_unfreeze_queues(dev); spin_unlock(&dev->tx_global_lock); } EXPORT_SYMBOL(netif_tx_unlock); static void dev_watchdog(struct timer_list *t) { struct net_device *dev = timer_container_of(dev, t, watchdog_timer); bool release = true; spin_lock(&dev->tx_global_lock); if (!qdisc_tx_is_noop(dev)) { if (netif_device_present(dev) && netif_running(dev) && netif_carrier_ok(dev)) { unsigned int timedout_ms = 0; unsigned int i; unsigned long trans_start; unsigned long oldest_start = jiffies; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq; txq = netdev_get_tx_queue(dev, i); if (!netif_xmit_stopped(txq)) continue; /* Paired with WRITE_ONCE() + smp_mb...() in * netdev_tx_sent_queue() and netif_tx_stop_queue(). */ smp_mb(); trans_start = READ_ONCE(txq->trans_start); if (time_after(jiffies, trans_start + dev->watchdog_timeo)) { timedout_ms = jiffies_to_msecs(jiffies - trans_start); atomic_long_inc(&txq->trans_timeout); break; } if (time_after(oldest_start, trans_start)) oldest_start = trans_start; } if (unlikely(timedout_ms)) { trace_net_dev_xmit_timeout(dev, i); netdev_crit(dev, "NETDEV WATCHDOG: CPU: %d: transmit queue %u timed out %u ms\n", raw_smp_processor_id(), i, timedout_ms); netif_freeze_queues(dev); dev->netdev_ops->ndo_tx_timeout(dev, i); netif_unfreeze_queues(dev); } if (!mod_timer(&dev->watchdog_timer, round_jiffies(oldest_start + dev->watchdog_timeo))) release = false; } } spin_unlock(&dev->tx_global_lock); if (release) netdev_put(dev, &dev->watchdog_dev_tracker); } void netdev_watchdog_up(struct net_device *dev) { if (!dev->netdev_ops->ndo_tx_timeout) return; if (dev->watchdog_timeo <= 0) dev->watchdog_timeo = 5*HZ; if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) netdev_hold(dev, &dev->watchdog_dev_tracker, GFP_ATOMIC); } EXPORT_SYMBOL_GPL(netdev_watchdog_up); static void netdev_watchdog_down(struct net_device *dev) { netif_tx_lock_bh(dev); if (timer_delete(&dev->watchdog_timer)) netdev_put(dev, &dev->watchdog_dev_tracker); netif_tx_unlock_bh(dev); } /** * netif_carrier_on - set carrier * @dev: network device * * Device has detected acquisition of carrier. */ void netif_carrier_on(struct net_device *dev) { if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { if (dev->reg_state == NETREG_UNINITIALIZED) return; atomic_inc(&dev->carrier_up_count); linkwatch_fire_event(dev); if (netif_running(dev)) netdev_watchdog_up(dev); } } EXPORT_SYMBOL(netif_carrier_on); /** * netif_carrier_off - clear carrier * @dev: network device * * Device has detected loss of carrier. */ void netif_carrier_off(struct net_device *dev) { if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { if (dev->reg_state == NETREG_UNINITIALIZED) return; atomic_inc(&dev->carrier_down_count); linkwatch_fire_event(dev); } } EXPORT_SYMBOL(netif_carrier_off); /** * netif_carrier_event - report carrier state event * @dev: network device * * Device has detected a carrier event but the carrier state wasn't changed. * Use in drivers when querying carrier state asynchronously, to avoid missing * events (link flaps) if link recovers before it's queried. */ void netif_carrier_event(struct net_device *dev) { if (dev->reg_state == NETREG_UNINITIALIZED) return; atomic_inc(&dev->carrier_up_count); atomic_inc(&dev->carrier_down_count); linkwatch_fire_event(dev); } EXPORT_SYMBOL_GPL(netif_carrier_event); /* "NOOP" scheduler: the best scheduler, recommended for all interfaces under all circumstances. It is difficult to invent anything faster or cheaper. */ static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, struct sk_buff **to_free) { dev_core_stats_tx_dropped_inc(skb->dev); __qdisc_drop(skb, to_free); return NET_XMIT_CN; } static struct sk_buff *noop_dequeue(struct Qdisc *qdisc) { return NULL; } struct Qdisc_ops noop_qdisc_ops __read_mostly = { .id = "noop", .priv_size = 0, .enqueue = noop_enqueue, .dequeue = noop_dequeue, .peek = noop_dequeue, .owner = THIS_MODULE, }; static struct netdev_queue noop_netdev_queue = { RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc), RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc), }; struct Qdisc noop_qdisc = { .enqueue = noop_enqueue, .dequeue = noop_dequeue, .flags = TCQ_F_BUILTIN, .ops = &noop_qdisc_ops, .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), .dev_queue = &noop_netdev_queue, .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), .gso_skb = { .next = (struct sk_buff *)&noop_qdisc.gso_skb, .prev = (struct sk_buff *)&noop_qdisc.gso_skb, .qlen = 0, .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock), }, .skb_bad_txq = { .next = (struct sk_buff *)&noop_qdisc.skb_bad_txq, .prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq, .qlen = 0, .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock), }, .owner = -1, }; EXPORT_SYMBOL(noop_qdisc); static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt, struct netlink_ext_ack *extack) { /* register_qdisc() assigns a default of noop_enqueue if unset, * but __dev_queue_xmit() treats noqueue only as such * if this is NULL - so clear it here. */ qdisc->enqueue = NULL; return 0; } struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { .id = "noqueue", .priv_size = 0, .init = noqueue_init, .enqueue = noop_enqueue, .dequeue = noop_dequeue, .peek = noop_dequeue, .owner = THIS_MODULE, }; const u8 sch_default_prio2band[TC_PRIO_MAX + 1] = { 1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1 }; EXPORT_SYMBOL(sch_default_prio2band); /* 3-band FIFO queue: old style, but should be a bit faster than generic prio+fifo combination. */ #define PFIFO_FAST_BANDS 3 /* * Private data for a pfifo_fast scheduler containing: * - rings for priority bands */ struct pfifo_fast_priv { struct skb_array q[PFIFO_FAST_BANDS]; }; static inline struct skb_array *band2list(struct pfifo_fast_priv *priv, int band) { return &priv->q[band]; } static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, struct sk_buff **to_free) { int band = sch_default_prio2band[skb->priority & TC_PRIO_MAX]; struct pfifo_fast_priv *priv = qdisc_priv(qdisc); struct skb_array *q = band2list(priv, band); unsigned int pkt_len = qdisc_pkt_len(skb); int err; err = skb_array_produce(q, skb); if (unlikely(err)) { tcf_set_drop_reason(skb, SKB_DROP_REASON_QDISC_OVERLIMIT); if (qdisc_is_percpu_stats(qdisc)) return qdisc_drop_cpu(skb, qdisc, to_free); else return qdisc_drop(skb, qdisc, to_free); } qdisc_update_stats_at_enqueue(qdisc, pkt_len); return NET_XMIT_SUCCESS; } static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) { struct pfifo_fast_priv *priv = qdisc_priv(qdisc); struct sk_buff *skb = NULL; bool need_retry = true; int band; retry: for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { struct skb_array *q = band2list(priv, band); if (__skb_array_empty(q)) continue; skb = __skb_array_consume(q); } if (likely(skb)) { qdisc_update_stats_at_dequeue(qdisc, skb); } else if (need_retry && READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) { /* Delay clearing the STATE_MISSED here to reduce * the overhead of the second spin_trylock() in * qdisc_run_begin() and __netif_schedule() calling * in qdisc_run_end(). */ clear_bit(__QDISC_STATE_MISSED, &qdisc->state); clear_bit(__QDISC_STATE_DRAINING, &qdisc->state); /* Make sure dequeuing happens after clearing * STATE_MISSED. */ smp_mb__after_atomic(); need_retry = false; goto retry; } return skb; } static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) { struct pfifo_fast_priv *priv = qdisc_priv(qdisc); struct sk_buff *skb = NULL; int band; for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { struct skb_array *q = band2list(priv, band); skb = __skb_array_peek(q); } return skb; } static void pfifo_fast_reset(struct Qdisc *qdisc) { int i, band; struct pfifo_fast_priv *priv = qdisc_priv(qdisc); for (band = 0; band < PFIFO_FAST_BANDS; band++) { struct skb_array *q = band2list(priv, band); struct sk_buff *skb; /* NULL ring is possible if destroy path is due to a failed * skb_array_init() in pfifo_fast_init() case. */ if (!q->ring.queue) continue; while ((skb = __skb_array_consume(q)) != NULL) kfree_skb(skb); } if (qdisc_is_percpu_stats(qdisc)) { for_each_possible_cpu(i) { struct gnet_stats_queue *q; q = per_cpu_ptr(qdisc->cpu_qstats, i); q->backlog = 0; q->qlen = 0; } } } static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) { struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; memcpy(&opt.priomap, sch_default_prio2band, TC_PRIO_MAX + 1); if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) goto nla_put_failure; return skb->len; nla_put_failure: return -1; } static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt, struct netlink_ext_ack *extack) { unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len; struct pfifo_fast_priv *priv = qdisc_priv(qdisc); int prio; /* guard against zero length rings */ if (!qlen) return -EINVAL; for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { struct skb_array *q = band2list(priv, prio); int err; err = skb_array_init(q, qlen, GFP_KERNEL); if (err) return -ENOMEM; } /* Can by-pass the queue discipline */ qdisc->flags |= TCQ_F_CAN_BYPASS; return 0; } static void pfifo_fast_destroy(struct Qdisc *sch) { struct pfifo_fast_priv *priv = qdisc_priv(sch); int prio; for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { struct skb_array *q = band2list(priv, prio); /* NULL ring is possible if destroy path is due to a failed * skb_array_init() in pfifo_fast_init() case. */ if (!q->ring.queue) continue; /* Destroy ring but no need to kfree_skb because a call to * pfifo_fast_reset() has already done that work. */ ptr_ring_cleanup(&q->ring, NULL); } } static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch, unsigned int new_len) { struct pfifo_fast_priv *priv = qdisc_priv(sch); struct skb_array *bands[PFIFO_FAST_BANDS]; int prio; for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { struct skb_array *q = band2list(priv, prio); bands[prio] = q; } return skb_array_resize_multiple_bh(bands, PFIFO_FAST_BANDS, new_len, GFP_KERNEL); } struct Qdisc_ops pfifo_fast_ops __read_mostly = { .id = "pfifo_fast", .priv_size = sizeof(struct pfifo_fast_priv), .enqueue = pfifo_fast_enqueue, .dequeue = pfifo_fast_dequeue, .peek = pfifo_fast_peek, .init = pfifo_fast_init, .destroy = pfifo_fast_destroy, .reset = pfifo_fast_reset, .dump = pfifo_fast_dump, .change_tx_queue_len = pfifo_fast_change_tx_queue_len, .owner = THIS_MODULE, .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS, }; EXPORT_SYMBOL(pfifo_fast_ops); static struct lock_class_key qdisc_tx_busylock; struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, const struct Qdisc_ops *ops, struct netlink_ext_ack *extack) { struct Qdisc *sch; unsigned int size = sizeof(*sch) + ops->priv_size; int err = -ENOBUFS; struct net_device *dev; if (!dev_queue) { NL_SET_ERR_MSG(extack, "No device queue given"); err = -EINVAL; goto errout; } dev = dev_queue->dev; sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue)); if (!sch) goto errout; __skb_queue_head_init(&sch->gso_skb); __skb_queue_head_init(&sch->skb_bad_txq); gnet_stats_basic_sync_init(&sch->bstats); lockdep_register_key(&sch->root_lock_key); spin_lock_init(&sch->q.lock); lockdep_set_class(&sch->q.lock, &sch->root_lock_key); if (ops->static_flags & TCQ_F_CPUSTATS) { sch->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync); if (!sch->cpu_bstats) goto errout1; sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue); if (!sch->cpu_qstats) { free_percpu(sch->cpu_bstats); goto errout1; } } spin_lock_init(&sch->busylock); lockdep_set_class(&sch->busylock, dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); /* seqlock has the same scope of busylock, for NOLOCK qdisc */ spin_lock_init(&sch->seqlock); lockdep_set_class(&sch->seqlock, dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); sch->ops = ops; sch->flags = ops->static_flags; sch->enqueue = ops->enqueue; sch->dequeue = ops->dequeue; sch->dev_queue = dev_queue; sch->owner = -1; netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL); refcount_set(&sch->refcnt, 1); return sch; errout1: lockdep_unregister_key(&sch->root_lock_key); kfree(sch); errout: return ERR_PTR(err); } struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, const struct Qdisc_ops *ops, unsigned int parentid, struct netlink_ext_ack *extack) { struct Qdisc *sch; if (!bpf_try_module_get(ops, ops->owner)) { NL_SET_ERR_MSG(extack, "Failed to increase module reference counter"); return NULL; } sch = qdisc_alloc(dev_queue, ops, extack); if (IS_ERR(sch)) { bpf_module_put(ops, ops->owner); return NULL; } sch->parent = parentid; if (!ops->init || ops->init(sch, NULL, extack) == 0) { trace_qdisc_create(ops, dev_queue->dev, parentid); return sch; } qdisc_put(sch); return NULL; } EXPORT_SYMBOL(qdisc_create_dflt); /* Under qdisc_lock(qdisc) and BH! */ void qdisc_reset(struct Qdisc *qdisc) { const struct Qdisc_ops *ops = qdisc->ops; trace_qdisc_reset(qdisc); if (ops->reset) ops->reset(qdisc); __skb_queue_purge(&qdisc->gso_skb); __skb_queue_purge(&qdisc->skb_bad_txq); qdisc->q.qlen = 0; qdisc->qstats.backlog = 0; } EXPORT_SYMBOL(qdisc_reset); void qdisc_free(struct Qdisc *qdisc) { if (qdisc_is_percpu_stats(qdisc)) { free_percpu(qdisc->cpu_bstats); free_percpu(qdisc->cpu_qstats); } kfree(qdisc); } static void qdisc_free_cb(struct rcu_head *head) { struct Qdisc *q = container_of(head, struct Qdisc, rcu); qdisc_free(q); } static void __qdisc_destroy(struct Qdisc *qdisc) { const struct Qdisc_ops *ops = qdisc->ops; struct net_device *dev = qdisc_dev(qdisc); #ifdef CONFIG_NET_SCHED qdisc_hash_del(qdisc); qdisc_put_stab(rtnl_dereference(qdisc->stab)); #endif gen_kill_estimator(&qdisc->rate_est); qdisc_reset(qdisc); if (ops->destroy) ops->destroy(qdisc); lockdep_unregister_key(&qdisc->root_lock_key); bpf_module_put(ops, ops->owner); netdev_put(dev, &qdisc->dev_tracker); trace_qdisc_destroy(qdisc); call_rcu(&qdisc->rcu, qdisc_free_cb); } void qdisc_destroy(struct Qdisc *qdisc) { if (qdisc->flags & TCQ_F_BUILTIN) return; __qdisc_destroy(qdisc); } void qdisc_put(struct Qdisc *qdisc) { if (!qdisc) return; if (qdisc->flags & TCQ_F_BUILTIN || !refcount_dec_and_test(&qdisc->refcnt)) return; __qdisc_destroy(qdisc); } EXPORT_SYMBOL(qdisc_put); /* Version of qdisc_put() that is called with rtnl mutex unlocked. * Intended to be used as optimization, this function only takes rtnl lock if * qdisc reference counter reached zero. */ void qdisc_put_unlocked(struct Qdisc *qdisc) { if (qdisc->flags & TCQ_F_BUILTIN || !refcount_dec_and_rtnl_lock(&qdisc->refcnt)) return; __qdisc_destroy(qdisc); rtnl_unlock(); } EXPORT_SYMBOL(qdisc_put_unlocked); /* Attach toplevel qdisc to device queue. */ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, struct Qdisc *qdisc) { struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping); spinlock_t *root_lock; root_lock = qdisc_lock(oqdisc); spin_lock_bh(root_lock); /* ... and graft new one */ if (qdisc == NULL) qdisc = &noop_qdisc; rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); spin_unlock_bh(root_lock); return oqdisc; } EXPORT_SYMBOL(dev_graft_qdisc); static void shutdown_scheduler_queue(struct net_device *dev, struct netdev_queue *dev_queue, void *_qdisc_default) { struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); struct Qdisc *qdisc_default = _qdisc_default; if (qdisc) { rcu_assign_pointer(dev_queue->qdisc, qdisc_default); rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default); qdisc_put(qdisc); } } static void attach_one_default_qdisc(struct net_device *dev, struct netdev_queue *dev_queue, void *_unused) { struct Qdisc *qdisc; const struct Qdisc_ops *ops = default_qdisc_ops; if (dev->priv_flags & IFF_NO_QUEUE) ops = &noqueue_qdisc_ops; else if(dev->type == ARPHRD_CAN) ops = &pfifo_fast_ops; qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL); if (!qdisc) return; if (!netif_is_multiqueue(dev)) qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); } static void attach_default_qdiscs(struct net_device *dev) { struct netdev_queue *txq; struct Qdisc *qdisc; txq = netdev_get_tx_queue(dev, 0); if (!netif_is_multiqueue(dev) || dev->priv_flags & IFF_NO_QUEUE) { netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); qdisc = rtnl_dereference(txq->qdisc_sleeping); rcu_assign_pointer(dev->qdisc, qdisc); qdisc_refcount_inc(qdisc); } else { qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL); if (qdisc) { rcu_assign_pointer(dev->qdisc, qdisc); qdisc->ops->attach(qdisc); } } qdisc = rtnl_dereference(dev->qdisc); /* Detect default qdisc setup/init failed and fallback to "noqueue" */ if (qdisc == &noop_qdisc) { netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n", default_qdisc_ops->id, noqueue_qdisc_ops.id); netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); dev->priv_flags |= IFF_NO_QUEUE; netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); qdisc = rtnl_dereference(txq->qdisc_sleeping); rcu_assign_pointer(dev->qdisc, qdisc); qdisc_refcount_inc(qdisc); dev->priv_flags ^= IFF_NO_QUEUE; } #ifdef CONFIG_NET_SCHED if (qdisc != &noop_qdisc) qdisc_hash_add(qdisc, false); #endif } static void transition_one_qdisc(struct net_device *dev, struct netdev_queue *dev_queue, void *_need_watchdog) { struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); int *need_watchdog_p = _need_watchdog; if (!(new_qdisc->flags & TCQ_F_BUILTIN)) clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); rcu_assign_pointer(dev_queue->qdisc, new_qdisc); if (need_watchdog_p) { WRITE_ONCE(dev_queue->trans_start, 0); *need_watchdog_p = 1; } } void dev_activate(struct net_device *dev) { int need_watchdog; /* No queueing discipline is attached to device; * create default one for devices, which need queueing * and noqueue_qdisc for virtual interfaces */ if (rtnl_dereference(dev->qdisc) == &noop_qdisc) attach_default_qdiscs(dev); if (!netif_carrier_ok(dev)) /* Delay activation until next carrier-on event */ return; need_watchdog = 0; netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); if (dev_ingress_queue(dev)) transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); if (need_watchdog) { netif_trans_update(dev); netdev_watchdog_up(dev); } } EXPORT_SYMBOL(dev_activate); static void qdisc_deactivate(struct Qdisc *qdisc) { if (qdisc->flags & TCQ_F_BUILTIN) return; set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); } static void dev_deactivate_queue(struct net_device *dev, struct netdev_queue *dev_queue, void *_sync_needed) { bool *sync_needed = _sync_needed; struct Qdisc *qdisc; qdisc = rtnl_dereference(dev_queue->qdisc); if (qdisc) { if (qdisc->enqueue) *sync_needed = true; qdisc_deactivate(qdisc); rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); } } static void dev_reset_queue(struct net_device *dev, struct netdev_queue *dev_queue, void *_unused) { struct Qdisc *qdisc; bool nolock; qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); if (!qdisc) return; nolock = qdisc->flags & TCQ_F_NOLOCK; if (nolock) spin_lock_bh(&qdisc->seqlock); spin_lock_bh(qdisc_lock(qdisc)); qdisc_reset(qdisc); spin_unlock_bh(qdisc_lock(qdisc)); if (nolock) { clear_bit(__QDISC_STATE_MISSED, &qdisc->state); clear_bit(__QDISC_STATE_DRAINING, &qdisc->state); spin_unlock_bh(&qdisc->seqlock); } } static bool some_qdisc_is_busy(struct net_device *dev) { unsigned int i; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *dev_queue; spinlock_t *root_lock; struct Qdisc *q; int val; dev_queue = netdev_get_tx_queue(dev, i); q = rtnl_dereference(dev_queue->qdisc_sleeping); root_lock = qdisc_lock(q); spin_lock_bh(root_lock); val = (qdisc_is_running(q) || test_bit(__QDISC_STATE_SCHED, &q->state)); spin_unlock_bh(root_lock); if (val) return true; } return false; } /** * dev_deactivate_many - deactivate transmissions on several devices * @head: list of devices to deactivate * * This function returns only when all outstanding transmissions * have completed, unless all devices are in dismantle phase. */ void dev_deactivate_many(struct list_head *head) { bool sync_needed = false; struct net_device *dev; list_for_each_entry(dev, head, close_list) { netdev_for_each_tx_queue(dev, dev_deactivate_queue, &sync_needed); if (dev_ingress_queue(dev)) dev_deactivate_queue(dev, dev_ingress_queue(dev), &sync_needed); netdev_watchdog_down(dev); } /* Wait for outstanding qdisc enqueuing calls. */ if (sync_needed) synchronize_net(); list_for_each_entry(dev, head, close_list) { netdev_for_each_tx_queue(dev, dev_reset_queue, NULL); if (dev_ingress_queue(dev)) dev_reset_queue(dev, dev_ingress_queue(dev), NULL); } /* Wait for outstanding qdisc_run calls. */ list_for_each_entry(dev, head, close_list) { while (some_qdisc_is_busy(dev)) { /* wait_event() would avoid this sleep-loop but would * require expensive checks in the fast paths of packet * processing which isn't worth it. */ schedule_timeout_uninterruptible(1); } } } void dev_deactivate(struct net_device *dev) { LIST_HEAD(single); list_add(&dev->close_list, &single); dev_deactivate_many(&single); list_del(&single); } EXPORT_SYMBOL(dev_deactivate); static int qdisc_change_tx_queue_len(struct net_device *dev, struct netdev_queue *dev_queue) { struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); const struct Qdisc_ops *ops = qdisc->ops; if (ops->change_tx_queue_len) return ops->change_tx_queue_len(qdisc, dev->tx_queue_len); return 0; } void dev_qdisc_change_real_num_tx(struct net_device *dev, unsigned int new_real_tx) { struct Qdisc *qdisc = rtnl_dereference(dev->qdisc); if (qdisc->ops->change_real_num_tx) qdisc->ops->change_real_num_tx(qdisc, new_real_tx); } void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx) { #ifdef CONFIG_NET_SCHED struct net_device *dev = qdisc_dev(sch); struct Qdisc *qdisc; unsigned int i; for (i = new_real_tx; i < dev->real_num_tx_queues; i++) { qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); /* Only update the default qdiscs we created, * qdiscs with handles are always hashed. */ if (qdisc != &noop_qdisc && !qdisc->handle) qdisc_hash_del(qdisc); } for (i = dev->real_num_tx_queues; i < new_real_tx; i++) { qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); if (qdisc != &noop_qdisc && !qdisc->handle) qdisc_hash_add(qdisc, false); } #endif } EXPORT_SYMBOL(mq_change_real_num_tx); int dev_qdisc_change_tx_queue_len(struct net_device *dev) { bool up = dev->flags & IFF_UP; unsigned int i; int ret = 0; if (up) dev_deactivate(dev); for (i = 0; i < dev->num_tx_queues; i++) { ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]); /* TODO: revert changes on a partial failure */ if (ret) break; } if (up) dev_activate(dev); return ret; } static void dev_init_scheduler_queue(struct net_device *dev, struct netdev_queue *dev_queue, void *_qdisc) { struct Qdisc *qdisc = _qdisc; rcu_assign_pointer(dev_queue->qdisc, qdisc); rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); } void dev_init_scheduler(struct net_device *dev) { rcu_assign_pointer(dev->qdisc, &noop_qdisc); netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); if (dev_ingress_queue(dev)) dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); timer_setup(&dev->watchdog_timer, dev_watchdog, 0); } void dev_shutdown(struct net_device *dev) { netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); if (dev_ingress_queue(dev)) shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); qdisc_put(rtnl_dereference(dev->qdisc)); rcu_assign_pointer(dev->qdisc, &noop_qdisc); WARN_ON(timer_pending(&dev->watchdog_timer)); } /** * psched_ratecfg_precompute__() - Pre-compute values for reciprocal division * @rate: Rate to compute reciprocal division values of * @mult: Multiplier for reciprocal division * @shift: Shift for reciprocal division * * The multiplier and shift for reciprocal division by rate are stored * in mult and shift. * * The deal here is to replace a divide by a reciprocal one * in fast path (a reciprocal divide is a multiply and a shift) * * Normal formula would be : * time_in_ns = (NSEC_PER_SEC * len) / rate_bps * * We compute mult/shift to use instead : * time_in_ns = (len * mult) >> shift; * * We try to get the highest possible mult value for accuracy, * but have to make sure no overflows will ever happen. * * reciprocal_value() is not used here it doesn't handle 64-bit values. */ static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift) { u64 factor = NSEC_PER_SEC; *mult = 1; *shift = 0; if (rate <= 0) return; for (;;) { *mult = div64_u64(factor, rate); if (*mult & (1U << 31) || factor & (1ULL << 63)) break; factor <<= 1; (*shift)++; } } void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf, u64 rate64) { memset(r, 0, sizeof(*r)); r->overhead = conf->overhead; r->mpu = conf->mpu; r->rate_bytes_ps = max_t(u64, conf->rate, rate64); r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift); } EXPORT_SYMBOL(psched_ratecfg_precompute); void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64) { r->rate_pkts_ps = pktrate64; psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift); } EXPORT_SYMBOL(psched_ppscfg_precompute); void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, struct tcf_proto *tp_head) { /* Protected with chain0->filter_chain_lock. * Can't access chain directly because tp_head can be NULL. */ struct mini_Qdisc *miniq_old = rcu_dereference_protected(*miniqp->p_miniq, 1); struct mini_Qdisc *miniq; if (!tp_head) { RCU_INIT_POINTER(*miniqp->p_miniq, NULL); } else { miniq = miniq_old != &miniqp->miniq1 ? &miniqp->miniq1 : &miniqp->miniq2; /* We need to make sure that readers won't see the miniq * we are about to modify. So ensure that at least one RCU * grace period has elapsed since the miniq was made * inactive. */ if (IS_ENABLED(CONFIG_PREEMPT_RT)) cond_synchronize_rcu(miniq->rcu_state); else if (!poll_state_synchronize_rcu(miniq->rcu_state)) synchronize_rcu_expedited(); miniq->filter_list = tp_head; rcu_assign_pointer(*miniqp->p_miniq, miniq); } if (miniq_old) /* This is counterpart of the rcu sync above. We need to * block potential new user of miniq_old until all readers * are not seeing it. */ miniq_old->rcu_state = start_poll_synchronize_rcu(); } EXPORT_SYMBOL(mini_qdisc_pair_swap); void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, struct tcf_block *block) { miniqp->miniq1.block = block; miniqp->miniq2.block = block; } EXPORT_SYMBOL(mini_qdisc_pair_block_init); void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, struct mini_Qdisc __rcu **p_miniq) { miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats; miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats; miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats; miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats; miniqp->miniq1.rcu_state = get_state_synchronize_rcu(); miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state; miniqp->p_miniq = p_miniq; } EXPORT_SYMBOL(mini_qdisc_pair_init);
2 1 1 2 1 2 1 10 1 1 9 8 1 1 1 1 1 1 3 1 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 // SPDX-License-Identifier: GPL-2.0 /* Driver for Theobroma Systems UCAN devices, Protocol Version 3 * * Copyright (C) 2018 Theobroma Systems Design und Consulting GmbH * * * General Description: * * The USB Device uses three Endpoints: * * CONTROL Endpoint: Is used the setup the device (start, stop, * info, configure). * * IN Endpoint: The device sends CAN Frame Messages and Device * Information using the IN endpoint. * * OUT Endpoint: The driver sends configuration requests, and CAN * Frames on the out endpoint. * * Error Handling: * * If error reporting is turned on the device encodes error into CAN * error frames (see uapi/linux/can/error.h) and sends it using the * IN Endpoint. The driver updates statistics and forward it. */ #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/ethtool.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/signal.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/usb.h> #define UCAN_DRIVER_NAME "ucan" #define UCAN_MAX_RX_URBS 8 /* the CAN controller needs a while to enable/disable the bus */ #define UCAN_USB_CTL_PIPE_TIMEOUT 1000 /* this driver currently supports protocol version 3 only */ #define UCAN_PROTOCOL_VERSION_MIN 3 #define UCAN_PROTOCOL_VERSION_MAX 3 /* UCAN Message Definitions * ------------------------ * * ucan_message_out_t and ucan_message_in_t define the messages * transmitted on the OUT and IN endpoint. * * Multibyte fields are transmitted with little endianness * * INTR Endpoint: a single uint32_t storing the current space in the fifo * * OUT Endpoint: single message of type ucan_message_out_t is * transmitted on the out endpoint * * IN Endpoint: multiple messages ucan_message_in_t concateted in * the following way: * * m[n].len <=> the length if message n(including the header in bytes) * m[n] is is aligned to a 4 byte boundary, hence * offset(m[0]) := 0; * offset(m[n+1]) := offset(m[n]) + (m[n].len + 3) & 3 * * this implies that * offset(m[n]) % 4 <=> 0 */ /* Device Global Commands */ enum { UCAN_DEVICE_GET_FW_STRING = 0, }; /* UCAN Commands */ enum { /* start the can transceiver - val defines the operation mode */ UCAN_COMMAND_START = 0, /* cancel pending transmissions and stop the can transceiver */ UCAN_COMMAND_STOP = 1, /* send can transceiver into low-power sleep mode */ UCAN_COMMAND_SLEEP = 2, /* wake up can transceiver from low-power sleep mode */ UCAN_COMMAND_WAKEUP = 3, /* reset the can transceiver */ UCAN_COMMAND_RESET = 4, /* get piece of info from the can transceiver - subcmd defines what * piece */ UCAN_COMMAND_GET = 5, /* clear or disable hardware filter - subcmd defines which of the two */ UCAN_COMMAND_FILTER = 6, /* Setup bittiming */ UCAN_COMMAND_SET_BITTIMING = 7, /* recover from bus-off state */ UCAN_COMMAND_RESTART = 8, }; /* UCAN_COMMAND_START and UCAN_COMMAND_GET_INFO operation modes (bitmap). * Undefined bits must be set to 0. */ enum { UCAN_MODE_LOOPBACK = BIT(0), UCAN_MODE_SILENT = BIT(1), UCAN_MODE_3_SAMPLES = BIT(2), UCAN_MODE_ONE_SHOT = BIT(3), UCAN_MODE_BERR_REPORT = BIT(4), }; /* UCAN_COMMAND_GET subcommands */ enum { UCAN_COMMAND_GET_INFO = 0, UCAN_COMMAND_GET_PROTOCOL_VERSION = 1, }; /* UCAN_COMMAND_FILTER subcommands */ enum { UCAN_FILTER_CLEAR = 0, UCAN_FILTER_DISABLE = 1, UCAN_FILTER_ENABLE = 2, }; /* OUT endpoint message types */ enum { UCAN_OUT_TX = 2, /* transmit a CAN frame */ }; /* IN endpoint message types */ enum { UCAN_IN_TX_COMPLETE = 1, /* CAN frame transmission completed */ UCAN_IN_RX = 2, /* CAN frame received */ }; struct ucan_ctl_cmd_start { __le16 mode; /* OR-ing any of UCAN_MODE_* */ } __packed; struct ucan_ctl_cmd_set_bittiming { __le32 tq; /* Time quanta (TQ) in nanoseconds */ __le16 brp; /* TQ Prescaler */ __le16 sample_point; /* Samplepoint on tenth percent */ u8 prop_seg; /* Propagation segment in TQs */ u8 phase_seg1; /* Phase buffer segment 1 in TQs */ u8 phase_seg2; /* Phase buffer segment 2 in TQs */ u8 sjw; /* Synchronisation jump width in TQs */ } __packed; struct ucan_ctl_cmd_device_info { __le32 freq; /* Clock Frequency for tq generation */ u8 tx_fifo; /* Size of the transmission fifo */ u8 sjw_max; /* can_bittiming fields... */ u8 tseg1_min; u8 tseg1_max; u8 tseg2_min; u8 tseg2_max; __le16 brp_inc; __le32 brp_min; __le32 brp_max; /* ...can_bittiming fields */ __le16 ctrlmodes; /* supported control modes */ __le16 hwfilter; /* Number of HW filter banks */ __le16 rxmboxes; /* Number of receive Mailboxes */ } __packed; struct ucan_ctl_cmd_get_protocol_version { __le32 version; } __packed; union ucan_ctl_payload { /* Setup Bittiming * bmRequest == UCAN_COMMAND_START */ struct ucan_ctl_cmd_start cmd_start; /* Setup Bittiming * bmRequest == UCAN_COMMAND_SET_BITTIMING */ struct ucan_ctl_cmd_set_bittiming cmd_set_bittiming; /* Get Device Information * bmRequest == UCAN_COMMAND_GET; wValue = UCAN_COMMAND_GET_INFO */ struct ucan_ctl_cmd_device_info cmd_get_device_info; /* Get Protocol Version * bmRequest == UCAN_COMMAND_GET; * wValue = UCAN_COMMAND_GET_PROTOCOL_VERSION */ struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version; u8 fw_str[128]; } __packed; enum { UCAN_TX_COMPLETE_SUCCESS = BIT(0), }; /* Transmission Complete within ucan_message_in */ struct ucan_tx_complete_entry_t { u8 echo_index; u8 flags; } __packed __aligned(0x2); /* CAN Data message format within ucan_message_in/out */ struct ucan_can_msg { /* note DLC is computed by * msg.len - sizeof (msg.len) * - sizeof (msg.type) * - sizeof (msg.can_msg.id) */ __le32 id; union { u8 data[CAN_MAX_DLEN]; /* Data of CAN frames */ u8 dlc; /* RTR dlc */ }; } __packed; /* OUT Endpoint, outbound messages */ struct ucan_message_out { __le16 len; /* Length of the content include header */ u8 type; /* UCAN_OUT_TX and friends */ u8 subtype; /* command sub type */ union { /* Transmit CAN frame * (type == UCAN_TX) && ((msg.can_msg.id & CAN_RTR_FLAG) == 0) * subtype stores the echo id */ struct ucan_can_msg can_msg; } msg; } __packed __aligned(0x4); /* IN Endpoint, inbound messages */ struct ucan_message_in { __le16 len; /* Length of the content include header */ u8 type; /* UCAN_IN_RX and friends */ u8 subtype; /* command sub type */ union { /* CAN Frame received * (type == UCAN_IN_RX) * && ((msg.can_msg.id & CAN_RTR_FLAG) == 0) */ struct ucan_can_msg can_msg; /* CAN transmission complete * (type == UCAN_IN_TX_COMPLETE) */ DECLARE_FLEX_ARRAY(struct ucan_tx_complete_entry_t, can_tx_complete_msg); } __aligned(0x4) msg; } __packed __aligned(0x4); /* Macros to calculate message lengths */ #define UCAN_OUT_HDR_SIZE offsetof(struct ucan_message_out, msg) #define UCAN_IN_HDR_SIZE offsetof(struct ucan_message_in, msg) #define UCAN_IN_LEN(member) (UCAN_OUT_HDR_SIZE + sizeof(member)) struct ucan_priv; /* Context Information for transmission URBs */ struct ucan_urb_context { struct ucan_priv *up; bool allocated; }; /* Information reported by the USB device */ struct ucan_device_info { struct can_bittiming_const bittiming_const; u8 tx_fifo; }; /* Driver private data */ struct ucan_priv { /* must be the first member */ struct can_priv can; /* linux USB device structures */ struct usb_device *udev; struct net_device *netdev; /* lock for can->echo_skb (used around * can_put/get/free_echo_skb */ spinlock_t echo_skb_lock; /* usb device information */ u8 intf_index; u8 in_ep_addr; u8 out_ep_addr; u16 in_ep_size; /* transmission and reception buffers */ struct usb_anchor rx_urbs; struct usb_anchor tx_urbs; union ucan_ctl_payload *ctl_msg_buffer; struct ucan_device_info device_info; /* transmission control information and locks */ spinlock_t context_lock; unsigned int available_tx_urbs; struct ucan_urb_context *context_array; }; static u8 ucan_can_cc_dlc2len(struct ucan_can_msg *msg, u16 len) { if (le32_to_cpu(msg->id) & CAN_RTR_FLAG) return can_cc_dlc2len(msg->dlc); else return can_cc_dlc2len(len - (UCAN_IN_HDR_SIZE + sizeof(msg->id))); } static void ucan_release_context_array(struct ucan_priv *up) { if (!up->context_array) return; /* lock is not needed because, driver is currently opening or closing */ up->available_tx_urbs = 0; kfree(up->context_array); up->context_array = NULL; } static int ucan_alloc_context_array(struct ucan_priv *up) { int i; /* release contexts if any */ ucan_release_context_array(up); up->context_array = kcalloc(up->device_info.tx_fifo, sizeof(*up->context_array), GFP_KERNEL); if (!up->context_array) { netdev_err(up->netdev, "Not enough memory to allocate tx contexts\n"); return -ENOMEM; } for (i = 0; i < up->device_info.tx_fifo; i++) { up->context_array[i].allocated = false; up->context_array[i].up = up; } /* lock is not needed because, driver is currently opening */ up->available_tx_urbs = up->device_info.tx_fifo; return 0; } static struct ucan_urb_context *ucan_alloc_context(struct ucan_priv *up) { int i; unsigned long flags; struct ucan_urb_context *ret = NULL; if (WARN_ON_ONCE(!up->context_array)) return NULL; /* execute context operation atomically */ spin_lock_irqsave(&up->context_lock, flags); for (i = 0; i < up->device_info.tx_fifo; i++) { if (!up->context_array[i].allocated) { /* update context */ ret = &up->context_array[i]; up->context_array[i].allocated = true; /* stop queue if necessary */ up->available_tx_urbs--; if (!up->available_tx_urbs) netif_stop_queue(up->netdev); break; } } spin_unlock_irqrestore(&up->context_lock, flags); return ret; } static bool ucan_release_context(struct ucan_priv *up, struct ucan_urb_context *ctx) { unsigned long flags; bool ret = false; if (WARN_ON_ONCE(!up->context_array)) return false; /* execute context operation atomically */ spin_lock_irqsave(&up->context_lock, flags); /* context was not allocated, maybe the device sent garbage */ if (ctx->allocated) { ctx->allocated = false; /* check if the queue needs to be woken */ if (!up->available_tx_urbs) netif_wake_queue(up->netdev); up->available_tx_urbs++; ret = true; } spin_unlock_irqrestore(&up->context_lock, flags); return ret; } static int ucan_ctrl_command_out(struct ucan_priv *up, u8 cmd, u16 subcmd, u16 datalen) { return usb_control_msg(up->udev, usb_sndctrlpipe(up->udev, 0), cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, subcmd, up->intf_index, up->ctl_msg_buffer, datalen, UCAN_USB_CTL_PIPE_TIMEOUT); } static void ucan_get_fw_str(struct ucan_priv *up, char *fw_str, size_t size) { int ret; ret = usb_control_msg(up->udev, usb_rcvctrlpipe(up->udev, 0), UCAN_DEVICE_GET_FW_STRING, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, fw_str, size - 1, UCAN_USB_CTL_PIPE_TIMEOUT); if (ret > 0) fw_str[ret] = '\0'; else strscpy(fw_str, "unknown", size); } /* Parse the device information structure reported by the device and * setup private variables accordingly */ static void ucan_parse_device_info(struct ucan_priv *up, struct ucan_ctl_cmd_device_info *device_info) { struct can_bittiming_const *bittiming = &up->device_info.bittiming_const; u16 ctrlmodes; /* store the data */ up->can.clock.freq = le32_to_cpu(device_info->freq); up->device_info.tx_fifo = device_info->tx_fifo; strcpy(bittiming->name, "ucan"); bittiming->tseg1_min = device_info->tseg1_min; bittiming->tseg1_max = device_info->tseg1_max; bittiming->tseg2_min = device_info->tseg2_min; bittiming->tseg2_max = device_info->tseg2_max; bittiming->sjw_max = device_info->sjw_max; bittiming->brp_min = le32_to_cpu(device_info->brp_min); bittiming->brp_max = le32_to_cpu(device_info->brp_max); bittiming->brp_inc = le16_to_cpu(device_info->brp_inc); ctrlmodes = le16_to_cpu(device_info->ctrlmodes); up->can.ctrlmode_supported = 0; if (ctrlmodes & UCAN_MODE_LOOPBACK) up->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK; if (ctrlmodes & UCAN_MODE_SILENT) up->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; if (ctrlmodes & UCAN_MODE_3_SAMPLES) up->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; if (ctrlmodes & UCAN_MODE_ONE_SHOT) up->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; if (ctrlmodes & UCAN_MODE_BERR_REPORT) up->can.ctrlmode_supported |= CAN_CTRLMODE_BERR_REPORTING; } /* Handle a CAN error frame that we have received from the device. * Returns true if the can state has changed. */ static bool ucan_handle_error_frame(struct ucan_priv *up, struct ucan_message_in *m, canid_t canid) { enum can_state new_state = up->can.state; struct net_device_stats *net_stats = &up->netdev->stats; struct can_device_stats *can_stats = &up->can.can_stats; if (canid & CAN_ERR_LOSTARB) can_stats->arbitration_lost++; if (canid & CAN_ERR_BUSERROR) can_stats->bus_error++; if (canid & CAN_ERR_ACK) net_stats->tx_errors++; if (canid & CAN_ERR_BUSOFF) new_state = CAN_STATE_BUS_OFF; /* controller problems, details in data[1] */ if (canid & CAN_ERR_CRTL) { u8 d1 = m->msg.can_msg.data[1]; if (d1 & CAN_ERR_CRTL_RX_OVERFLOW) net_stats->rx_over_errors++; /* controller state bits: if multiple are set the worst wins */ if (d1 & CAN_ERR_CRTL_ACTIVE) new_state = CAN_STATE_ERROR_ACTIVE; if (d1 & (CAN_ERR_CRTL_RX_WARNING | CAN_ERR_CRTL_TX_WARNING)) new_state = CAN_STATE_ERROR_WARNING; if (d1 & (CAN_ERR_CRTL_RX_PASSIVE | CAN_ERR_CRTL_TX_PASSIVE)) new_state = CAN_STATE_ERROR_PASSIVE; } /* protocol error, details in data[2] */ if (canid & CAN_ERR_PROT) { u8 d2 = m->msg.can_msg.data[2]; if (d2 & CAN_ERR_PROT_TX) net_stats->tx_errors++; else net_stats->rx_errors++; } /* no state change - we are done */ if (up->can.state == new_state) return false; /* we switched into a better state */ if (up->can.state > new_state) { up->can.state = new_state; return true; } /* we switched into a worse state */ up->can.state = new_state; switch (new_state) { case CAN_STATE_BUS_OFF: can_stats->bus_off++; can_bus_off(up->netdev); break; case CAN_STATE_ERROR_PASSIVE: can_stats->error_passive++; break; case CAN_STATE_ERROR_WARNING: can_stats->error_warning++; break; default: break; } return true; } /* Callback on reception of a can frame via the IN endpoint * * This function allocates an skb and transferres it to the Linux * network stack */ static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m) { int len; canid_t canid; struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats = &up->netdev->stats; /* get the contents of the length field */ len = le16_to_cpu(m->len); /* check sanity */ if (len < UCAN_IN_HDR_SIZE + sizeof(m->msg.can_msg.id)) { netdev_warn(up->netdev, "invalid input message len: %d\n", len); return; } /* handle error frames */ canid = le32_to_cpu(m->msg.can_msg.id); if (canid & CAN_ERR_FLAG) { bool busstate_changed = ucan_handle_error_frame(up, m, canid); /* if berr-reporting is off only state changes get through */ if (!(up->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && !busstate_changed) return; } else { canid_t canid_mask; /* compute the mask for canid */ canid_mask = CAN_RTR_FLAG; if (canid & CAN_EFF_FLAG) canid_mask |= CAN_EFF_MASK | CAN_EFF_FLAG; else canid_mask |= CAN_SFF_MASK; if (canid & ~canid_mask) netdev_warn(up->netdev, "unexpected bits set (canid %x, mask %x)", canid, canid_mask); canid &= canid_mask; } /* allocate skb */ skb = alloc_can_skb(up->netdev, &cf); if (!skb) return; /* fill the can frame */ cf->can_id = canid; /* compute DLC taking RTR_FLAG into account */ cf->len = ucan_can_cc_dlc2len(&m->msg.can_msg, len); /* copy the payload of non RTR frames */ if (!(cf->can_id & CAN_RTR_FLAG) || (cf->can_id & CAN_ERR_FLAG)) memcpy(cf->data, m->msg.can_msg.data, cf->len); /* don't count error frames as real packets */ if (!(cf->can_id & CAN_ERR_FLAG)) { stats->rx_packets++; if (!(cf->can_id & CAN_RTR_FLAG)) stats->rx_bytes += cf->len; } /* pass it to Linux */ netif_rx(skb); } /* callback indicating completed transmission */ static void ucan_tx_complete_msg(struct ucan_priv *up, struct ucan_message_in *m) { unsigned long flags; u16 count, i; u8 echo_index; u16 len = le16_to_cpu(m->len); struct ucan_urb_context *context; if (len < UCAN_IN_HDR_SIZE || (len % 2 != 0)) { netdev_err(up->netdev, "invalid tx complete length\n"); return; } count = (len - UCAN_IN_HDR_SIZE) / 2; for (i = 0; i < count; i++) { /* we did not submit such echo ids */ echo_index = m->msg.can_tx_complete_msg[i].echo_index; if (echo_index >= up->device_info.tx_fifo) { up->netdev->stats.tx_errors++; netdev_err(up->netdev, "invalid echo_index %d received\n", echo_index); continue; } /* gather information from the context */ context = &up->context_array[echo_index]; /* Release context and restart queue if necessary. * Also check if the context was allocated */ if (!ucan_release_context(up, context)) continue; spin_lock_irqsave(&up->echo_skb_lock, flags); if (m->msg.can_tx_complete_msg[i].flags & UCAN_TX_COMPLETE_SUCCESS) { /* update statistics */ up->netdev->stats.tx_packets++; up->netdev->stats.tx_bytes += can_get_echo_skb(up->netdev, echo_index, NULL); } else { up->netdev->stats.tx_dropped++; can_free_echo_skb(up->netdev, echo_index, NULL); } spin_unlock_irqrestore(&up->echo_skb_lock, flags); } } /* callback on reception of a USB message */ static void ucan_read_bulk_callback(struct urb *urb) { int ret; int pos; struct ucan_priv *up = urb->context; struct net_device *netdev = up->netdev; struct ucan_message_in *m; /* the device is not up and the driver should not receive any * data on the bulk in pipe */ if (WARN_ON(!up->context_array)) { usb_free_coherent(up->udev, up->in_ep_size, urb->transfer_buffer, urb->transfer_dma); return; } /* check URB status */ switch (urb->status) { case 0: break; case -ENOENT: case -EPIPE: case -EPROTO: case -ESHUTDOWN: case -ETIME: /* urb is not resubmitted -> free dma data */ usb_free_coherent(up->udev, up->in_ep_size, urb->transfer_buffer, urb->transfer_dma); netdev_dbg(up->netdev, "not resubmitting urb; status: %d\n", urb->status); return; default: goto resubmit; } /* sanity check */ if (!netif_device_present(netdev)) return; /* iterate over input */ pos = 0; while (pos < urb->actual_length) { int len; /* check sanity (length of header) */ if ((urb->actual_length - pos) < UCAN_IN_HDR_SIZE) { netdev_warn(up->netdev, "invalid message (short; no hdr; l:%d)\n", urb->actual_length); goto resubmit; } /* setup the message address */ m = (struct ucan_message_in *) ((u8 *)urb->transfer_buffer + pos); len = le16_to_cpu(m->len); /* check sanity (length of content) */ if (urb->actual_length - pos < len) { netdev_warn(up->netdev, "invalid message (short; no data; l:%d)\n", urb->actual_length); print_hex_dump(KERN_WARNING, "raw data: ", DUMP_PREFIX_ADDRESS, 16, 1, urb->transfer_buffer, urb->actual_length, true); goto resubmit; } switch (m->type) { case UCAN_IN_RX: ucan_rx_can_msg(up, m); break; case UCAN_IN_TX_COMPLETE: ucan_tx_complete_msg(up, m); break; default: netdev_warn(up->netdev, "invalid message (type; t:%d)\n", m->type); break; } /* proceed to next message */ pos += len; /* align to 4 byte boundary */ pos = round_up(pos, 4); } resubmit: /* resubmit urb when done */ usb_fill_bulk_urb(urb, up->udev, usb_rcvbulkpipe(up->udev, up->in_ep_addr), urb->transfer_buffer, up->in_ep_size, ucan_read_bulk_callback, up); usb_anchor_urb(urb, &up->rx_urbs); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) { netdev_err(up->netdev, "failed resubmitting read bulk urb: %d\n", ret); usb_unanchor_urb(urb); usb_free_coherent(up->udev, up->in_ep_size, urb->transfer_buffer, urb->transfer_dma); if (ret == -ENODEV) netif_device_detach(netdev); } } /* callback after transmission of a USB message */ static void ucan_write_bulk_callback(struct urb *urb) { unsigned long flags; struct ucan_priv *up; struct ucan_urb_context *context = urb->context; /* get the urb context */ if (WARN_ON_ONCE(!context)) return; /* free up our allocated buffer */ usb_free_coherent(urb->dev, sizeof(struct ucan_message_out), urb->transfer_buffer, urb->transfer_dma); up = context->up; if (WARN_ON_ONCE(!up)) return; /* sanity check */ if (!netif_device_present(up->netdev)) return; /* transmission failed (USB - the device will not send a TX complete) */ if (urb->status) { netdev_warn(up->netdev, "failed to transmit USB message to device: %d\n", urb->status); /* update counters an cleanup */ spin_lock_irqsave(&up->echo_skb_lock, flags); can_free_echo_skb(up->netdev, context - up->context_array, NULL); spin_unlock_irqrestore(&up->echo_skb_lock, flags); up->netdev->stats.tx_dropped++; /* release context and restart the queue if necessary */ if (!ucan_release_context(up, context)) netdev_err(up->netdev, "urb failed, failed to release context\n"); } } static void ucan_cleanup_rx_urbs(struct ucan_priv *up, struct urb **urbs) { int i; for (i = 0; i < UCAN_MAX_RX_URBS; i++) { if (urbs[i]) { usb_unanchor_urb(urbs[i]); usb_free_coherent(up->udev, up->in_ep_size, urbs[i]->transfer_buffer, urbs[i]->transfer_dma); usb_free_urb(urbs[i]); } } memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS); } static int ucan_prepare_and_anchor_rx_urbs(struct ucan_priv *up, struct urb **urbs) { int i; memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS); for (i = 0; i < UCAN_MAX_RX_URBS; i++) { void *buf; urbs[i] = usb_alloc_urb(0, GFP_KERNEL); if (!urbs[i]) goto err; buf = usb_alloc_coherent(up->udev, up->in_ep_size, GFP_KERNEL, &urbs[i]->transfer_dma); if (!buf) { /* cleanup this urb */ usb_free_urb(urbs[i]); urbs[i] = NULL; goto err; } usb_fill_bulk_urb(urbs[i], up->udev, usb_rcvbulkpipe(up->udev, up->in_ep_addr), buf, up->in_ep_size, ucan_read_bulk_callback, up); urbs[i]->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urbs[i], &up->rx_urbs); } return 0; err: /* cleanup other unsubmitted urbs */ ucan_cleanup_rx_urbs(up, urbs); return -ENOMEM; } /* Submits rx urbs with the semantic: Either submit all, or cleanup * everything. I case of errors submitted urbs are killed and all urbs in * the array are freed. I case of no errors every entry in the urb * array is set to NULL. */ static int ucan_submit_rx_urbs(struct ucan_priv *up, struct urb **urbs) { int i, ret; /* Iterate over all urbs to submit. On success remove the urb * from the list. */ for (i = 0; i < UCAN_MAX_RX_URBS; i++) { ret = usb_submit_urb(urbs[i], GFP_KERNEL); if (ret) { netdev_err(up->netdev, "could not submit urb; code: %d\n", ret); goto err; } /* Anchor URB and drop reference, USB core will take * care of freeing it */ usb_free_urb(urbs[i]); urbs[i] = NULL; } return 0; err: /* Cleanup unsubmitted urbs */ ucan_cleanup_rx_urbs(up, urbs); /* Kill urbs that are already submitted */ usb_kill_anchored_urbs(&up->rx_urbs); return ret; } /* Open the network device */ static int ucan_open(struct net_device *netdev) { int ret, ret_cleanup; u16 ctrlmode; struct urb *urbs[UCAN_MAX_RX_URBS]; struct ucan_priv *up = netdev_priv(netdev); ret = ucan_alloc_context_array(up); if (ret) return ret; /* Allocate and prepare IN URBS - allocated and anchored * urbs are stored in urbs[] for clean */ ret = ucan_prepare_and_anchor_rx_urbs(up, urbs); if (ret) goto err_contexts; /* Check the control mode */ ctrlmode = 0; if (up->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) ctrlmode |= UCAN_MODE_LOOPBACK; if (up->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) ctrlmode |= UCAN_MODE_SILENT; if (up->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ctrlmode |= UCAN_MODE_3_SAMPLES; if (up->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) ctrlmode |= UCAN_MODE_ONE_SHOT; /* Enable this in any case - filtering is down within the * receive path */ ctrlmode |= UCAN_MODE_BERR_REPORT; up->ctl_msg_buffer->cmd_start.mode = cpu_to_le16(ctrlmode); /* Driver is ready to receive data - start the USB device */ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_START, 0, 2); if (ret < 0) { netdev_err(up->netdev, "could not start device, code: %d\n", ret); goto err_reset; } /* Call CAN layer open */ ret = open_candev(netdev); if (ret) goto err_stop; /* Driver is ready to receive data. Submit RX URBS */ ret = ucan_submit_rx_urbs(up, urbs); if (ret) goto err_stop; up->can.state = CAN_STATE_ERROR_ACTIVE; /* Start the network queue */ netif_start_queue(netdev); return 0; err_stop: /* The device have started already stop it */ ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0); if (ret_cleanup < 0) netdev_err(up->netdev, "could not stop device, code: %d\n", ret_cleanup); err_reset: /* The device might have received data, reset it for * consistent state */ ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0); if (ret_cleanup < 0) netdev_err(up->netdev, "could not reset device, code: %d\n", ret_cleanup); /* clean up unsubmitted urbs */ ucan_cleanup_rx_urbs(up, urbs); err_contexts: ucan_release_context_array(up); return ret; } static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up, struct ucan_urb_context *context, struct can_frame *cf, u8 echo_index) { int mlen; struct urb *urb; struct ucan_message_out *m; /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { netdev_err(up->netdev, "no memory left for URBs\n"); return NULL; } m = usb_alloc_coherent(up->udev, sizeof(struct ucan_message_out), GFP_ATOMIC, &urb->transfer_dma); if (!m) { netdev_err(up->netdev, "no memory left for USB buffer\n"); usb_free_urb(urb); return NULL; } /* build the USB message */ m->type = UCAN_OUT_TX; m->msg.can_msg.id = cpu_to_le32(cf->can_id); if (cf->can_id & CAN_RTR_FLAG) { mlen = UCAN_OUT_HDR_SIZE + offsetof(struct ucan_can_msg, dlc) + sizeof(m->msg.can_msg.dlc); m->msg.can_msg.dlc = cf->len; } else { mlen = UCAN_OUT_HDR_SIZE + sizeof(m->msg.can_msg.id) + cf->len; memcpy(m->msg.can_msg.data, cf->data, cf->len); } m->len = cpu_to_le16(mlen); m->subtype = echo_index; /* build the urb */ usb_fill_bulk_urb(urb, up->udev, usb_sndbulkpipe(up->udev, up->out_ep_addr), m, mlen, ucan_write_bulk_callback, context); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; return urb; } static void ucan_clean_up_tx_urb(struct ucan_priv *up, struct urb *urb) { usb_free_coherent(up->udev, sizeof(struct ucan_message_out), urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); } /* callback when Linux needs to send a can frame */ static netdev_tx_t ucan_start_xmit(struct sk_buff *skb, struct net_device *netdev) { unsigned long flags; int ret; u8 echo_index; struct urb *urb; struct ucan_urb_context *context; struct ucan_priv *up = netdev_priv(netdev); struct can_frame *cf = (struct can_frame *)skb->data; /* check skb */ if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* allocate a context and slow down tx path, if fifo state is low */ context = ucan_alloc_context(up); echo_index = context - up->context_array; if (WARN_ON_ONCE(!context)) return NETDEV_TX_BUSY; /* prepare urb for transmission */ urb = ucan_prepare_tx_urb(up, context, cf, echo_index); if (!urb) goto drop; /* put the skb on can loopback stack */ spin_lock_irqsave(&up->echo_skb_lock, flags); can_put_echo_skb(skb, up->netdev, echo_index, 0); spin_unlock_irqrestore(&up->echo_skb_lock, flags); /* transmit it */ usb_anchor_urb(urb, &up->tx_urbs); ret = usb_submit_urb(urb, GFP_ATOMIC); /* cleanup urb */ if (ret) { /* on error, clean up */ usb_unanchor_urb(urb); ucan_clean_up_tx_urb(up, urb); if (!ucan_release_context(up, context)) netdev_err(up->netdev, "xmit err: failed to release context\n"); /* remove the skb from the echo stack - this also * frees the skb */ spin_lock_irqsave(&up->echo_skb_lock, flags); can_free_echo_skb(up->netdev, echo_index, NULL); spin_unlock_irqrestore(&up->echo_skb_lock, flags); if (ret == -ENODEV) { netif_device_detach(up->netdev); } else { netdev_warn(up->netdev, "xmit err: failed to submit urb %d\n", ret); up->netdev->stats.tx_dropped++; } return NETDEV_TX_OK; } netif_trans_update(netdev); /* release ref, as we do not need the urb anymore */ usb_free_urb(urb); return NETDEV_TX_OK; drop: if (!ucan_release_context(up, context)) netdev_err(up->netdev, "xmit drop: failed to release context\n"); dev_kfree_skb(skb); up->netdev->stats.tx_dropped++; return NETDEV_TX_OK; } /* Device goes down * * Clean up used resources */ static int ucan_close(struct net_device *netdev) { int ret; struct ucan_priv *up = netdev_priv(netdev); up->can.state = CAN_STATE_STOPPED; /* stop sending data */ usb_kill_anchored_urbs(&up->tx_urbs); /* stop receiving data */ usb_kill_anchored_urbs(&up->rx_urbs); /* stop and reset can device */ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0); if (ret < 0) netdev_err(up->netdev, "could not stop device, code: %d\n", ret); ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0); if (ret < 0) netdev_err(up->netdev, "could not reset device, code: %d\n", ret); netif_stop_queue(netdev); ucan_release_context_array(up); close_candev(up->netdev); return 0; } /* CAN driver callbacks */ static const struct net_device_ops ucan_netdev_ops = { .ndo_open = ucan_open, .ndo_stop = ucan_close, .ndo_start_xmit = ucan_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops ucan_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; /* Request to set bittiming * * This function generates an USB set bittiming message and transmits * it to the device */ static int ucan_set_bittiming(struct net_device *netdev) { int ret; struct ucan_priv *up = netdev_priv(netdev); struct ucan_ctl_cmd_set_bittiming *cmd_set_bittiming; cmd_set_bittiming = &up->ctl_msg_buffer->cmd_set_bittiming; cmd_set_bittiming->tq = cpu_to_le32(up->can.bittiming.tq); cmd_set_bittiming->brp = cpu_to_le16(up->can.bittiming.brp); cmd_set_bittiming->sample_point = cpu_to_le16(up->can.bittiming.sample_point); cmd_set_bittiming->prop_seg = up->can.bittiming.prop_seg; cmd_set_bittiming->phase_seg1 = up->can.bittiming.phase_seg1; cmd_set_bittiming->phase_seg2 = up->can.bittiming.phase_seg2; cmd_set_bittiming->sjw = up->can.bittiming.sjw; ret = ucan_ctrl_command_out(up, UCAN_COMMAND_SET_BITTIMING, 0, sizeof(*cmd_set_bittiming)); return (ret < 0) ? ret : 0; } /* Restart the device to get it out of BUS-OFF state. * Called when the user runs "ip link set can1 type can restart". */ static int ucan_set_mode(struct net_device *netdev, enum can_mode mode) { int ret; unsigned long flags; struct ucan_priv *up = netdev_priv(netdev); switch (mode) { case CAN_MODE_START: netdev_dbg(up->netdev, "restarting device\n"); ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESTART, 0, 0); up->can.state = CAN_STATE_ERROR_ACTIVE; /* check if queue can be restarted, * up->available_tx_urbs must be protected by the * lock */ spin_lock_irqsave(&up->context_lock, flags); if (up->available_tx_urbs > 0) netif_wake_queue(up->netdev); spin_unlock_irqrestore(&up->context_lock, flags); return ret; default: return -EOPNOTSUPP; } } /* Probe the device, reset it and gather general device information */ static int ucan_probe(struct usb_interface *intf, const struct usb_device_id *id) { int ret; int i; u32 protocol_version; struct usb_device *udev; struct net_device *netdev; struct usb_host_interface *iface_desc; struct ucan_priv *up; struct usb_endpoint_descriptor *ep; u16 in_ep_size; u16 out_ep_size; u8 in_ep_addr; u8 out_ep_addr; union ucan_ctl_payload *ctl_msg_buffer; udev = interface_to_usbdev(intf); /* Stage 1 - Interface Parsing * --------------------------- * * Identifie the device USB interface descriptor and its * endpoints. Probing is aborted on errors. */ /* check if the interface is sane */ iface_desc = intf->cur_altsetting; if (!iface_desc) return -ENODEV; dev_info(&udev->dev, "%s: probing device on interface #%d\n", UCAN_DRIVER_NAME, iface_desc->desc.bInterfaceNumber); /* interface sanity check */ if (iface_desc->desc.bNumEndpoints != 2) { dev_err(&udev->dev, "%s: invalid EP count (%d)", UCAN_DRIVER_NAME, iface_desc->desc.bNumEndpoints); goto err_firmware_needs_update; } /* check interface endpoints */ in_ep_addr = 0; out_ep_addr = 0; in_ep_size = 0; out_ep_size = 0; for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { ep = &iface_desc->endpoint[i].desc; if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) != 0) && ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK)) { /* In Endpoint */ in_ep_addr = ep->bEndpointAddress; in_ep_addr &= USB_ENDPOINT_NUMBER_MASK; in_ep_size = le16_to_cpu(ep->wMaxPacketSize); } else if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == 0) && ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK)) { /* Out Endpoint */ out_ep_addr = ep->bEndpointAddress; out_ep_addr &= USB_ENDPOINT_NUMBER_MASK; out_ep_size = le16_to_cpu(ep->wMaxPacketSize); } } /* check if interface is sane */ if (!in_ep_addr || !out_ep_addr) { dev_err(&udev->dev, "%s: invalid endpoint configuration\n", UCAN_DRIVER_NAME); goto err_firmware_needs_update; } if (in_ep_size < sizeof(struct ucan_message_in)) { dev_err(&udev->dev, "%s: invalid in_ep MaxPacketSize\n", UCAN_DRIVER_NAME); goto err_firmware_needs_update; } if (out_ep_size < sizeof(struct ucan_message_out)) { dev_err(&udev->dev, "%s: invalid out_ep MaxPacketSize\n", UCAN_DRIVER_NAME); goto err_firmware_needs_update; } /* Stage 2 - Device Identification * ------------------------------- * * The device interface seems to be a ucan device. Do further * compatibility checks. On error probing is aborted, on * success this stage leaves the ctl_msg_buffer with the * reported contents of a GET_INFO command (supported * bittimings, tx_fifo depth). This information is used in * Stage 3 for the final driver initialisation. */ /* Prepare Memory for control transfers */ ctl_msg_buffer = devm_kzalloc(&udev->dev, sizeof(union ucan_ctl_payload), GFP_KERNEL); if (!ctl_msg_buffer) { dev_err(&udev->dev, "%s: failed to allocate control pipe memory\n", UCAN_DRIVER_NAME); return -ENOMEM; } /* get protocol version * * note: ucan_ctrl_command_* wrappers cannot be used yet * because `up` is initialised in Stage 3 */ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), UCAN_COMMAND_GET, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, UCAN_COMMAND_GET_PROTOCOL_VERSION, iface_desc->desc.bInterfaceNumber, ctl_msg_buffer, sizeof(union ucan_ctl_payload), UCAN_USB_CTL_PIPE_TIMEOUT); /* older firmware version do not support this command - those * are not supported by this drive */ if (ret != 4) { dev_err(&udev->dev, "%s: could not read protocol version, ret=%d\n", UCAN_DRIVER_NAME, ret); if (ret >= 0) ret = -EINVAL; goto err_firmware_needs_update; } /* this driver currently supports protocol version 3 only */ protocol_version = le32_to_cpu(ctl_msg_buffer->cmd_get_protocol_version.version); if (protocol_version < UCAN_PROTOCOL_VERSION_MIN || protocol_version > UCAN_PROTOCOL_VERSION_MAX) { dev_err(&udev->dev, "%s: device protocol version %d is not supported\n", UCAN_DRIVER_NAME, protocol_version); goto err_firmware_needs_update; } /* request the device information and store it in ctl_msg_buffer * * note: ucan_ctrl_command_* wrappers cannot be used yet * because `up` is initialised in Stage 3 */ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), UCAN_COMMAND_GET, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, UCAN_COMMAND_GET_INFO, iface_desc->desc.bInterfaceNumber, ctl_msg_buffer, sizeof(ctl_msg_buffer->cmd_get_device_info), UCAN_USB_CTL_PIPE_TIMEOUT); if (ret < 0) { dev_err(&udev->dev, "%s: failed to retrieve device info\n", UCAN_DRIVER_NAME); goto err_firmware_needs_update; } if (ret < sizeof(ctl_msg_buffer->cmd_get_device_info)) { dev_err(&udev->dev, "%s: device reported invalid device info\n", UCAN_DRIVER_NAME); goto err_firmware_needs_update; } if (ctl_msg_buffer->cmd_get_device_info.tx_fifo == 0) { dev_err(&udev->dev, "%s: device reported invalid tx-fifo size\n", UCAN_DRIVER_NAME); goto err_firmware_needs_update; } /* Stage 3 - Driver Initialisation * ------------------------------- * * Register device to Linux, prepare private structures and * reset the device. */ /* allocate driver resources */ netdev = alloc_candev(sizeof(struct ucan_priv), ctl_msg_buffer->cmd_get_device_info.tx_fifo); if (!netdev) { dev_err(&udev->dev, "%s: cannot allocate candev\n", UCAN_DRIVER_NAME); return -ENOMEM; } up = netdev_priv(netdev); /* initialize data */ up->udev = udev; up->netdev = netdev; up->intf_index = iface_desc->desc.bInterfaceNumber; up->in_ep_addr = in_ep_addr; up->out_ep_addr = out_ep_addr; up->in_ep_size = in_ep_size; up->ctl_msg_buffer = ctl_msg_buffer; up->context_array = NULL; up->available_tx_urbs = 0; up->can.state = CAN_STATE_STOPPED; up->can.bittiming_const = &up->device_info.bittiming_const; up->can.do_set_bittiming = ucan_set_bittiming; up->can.do_set_mode = &ucan_set_mode; spin_lock_init(&up->context_lock); spin_lock_init(&up->echo_skb_lock); netdev->netdev_ops = &ucan_netdev_ops; netdev->ethtool_ops = &ucan_ethtool_ops; usb_set_intfdata(intf, up); SET_NETDEV_DEV(netdev, &intf->dev); /* parse device information * the data retrieved in Stage 2 is still available in * up->ctl_msg_buffer */ ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info); /* device is compatible, reset it */ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0); if (ret < 0) goto err_free_candev; init_usb_anchor(&up->rx_urbs); init_usb_anchor(&up->tx_urbs); up->can.state = CAN_STATE_STOPPED; /* register the device */ ret = register_candev(netdev); if (ret) goto err_free_candev; /* initialisation complete, log device info */ netdev_info(up->netdev, "registered device\n"); ucan_get_fw_str(up, up->ctl_msg_buffer->fw_str, sizeof(up->ctl_msg_buffer->fw_str)); netdev_info(up->netdev, "firmware string: %s\n", up->ctl_msg_buffer->fw_str); /* success */ return 0; err_free_candev: free_candev(netdev); return ret; err_firmware_needs_update: dev_err(&udev->dev, "%s: probe failed; try to update the device firmware\n", UCAN_DRIVER_NAME); return -ENODEV; } /* disconnect the device */ static void ucan_disconnect(struct usb_interface *intf) { struct ucan_priv *up = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (up) { unregister_candev(up->netdev); free_candev(up->netdev); } } static struct usb_device_id ucan_table[] = { /* Mule (soldered onto compute modules) */ {USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425a, 0)}, /* Seal (standalone USB stick) */ {USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425b, 0)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ucan_table); /* driver callbacks */ static struct usb_driver ucan_driver = { .name = UCAN_DRIVER_NAME, .probe = ucan_probe, .disconnect = ucan_disconnect, .id_table = ucan_table, }; module_usb_driver(ucan_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Martin Elshuber <martin.elshuber@theobroma-systems.com>"); MODULE_AUTHOR("Jakob Unterwurzacher <jakob.unterwurzacher@theobroma-systems.com>"); MODULE_DESCRIPTION("Driver for Theobroma Systems UCAN devices");
1 6 1 1 1 6 6 6 6 6 6 6 6 6 6 6 6 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 /* * Copyright (c) 2005 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/string.h> #include <linux/parser.h> #include <linux/random.h> #include <linux/jiffies.h> #include <linux/lockdep.h> #include <linux/inet.h> #include <rdma/ib_cache.h> #include <linux/atomic.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_tcq.h> #include <scsi/srp.h> #include <scsi/scsi_transport_srp.h> #include "ib_srp.h" #define DRV_NAME "ib_srp" #define PFX DRV_NAME ": " MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator"); MODULE_LICENSE("Dual BSD/GPL"); static unsigned int srp_sg_tablesize; static unsigned int cmd_sg_entries; static unsigned int indirect_sg_entries; static bool allow_ext_sg; static bool register_always = true; static bool never_register; static int topspin_workarounds = 1; module_param(srp_sg_tablesize, uint, 0444); MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries"); module_param(cmd_sg_entries, uint, 0444); MODULE_PARM_DESC(cmd_sg_entries, "Default number of gather/scatter entries in the SRP command (default is 12, max 255)"); module_param(indirect_sg_entries, uint, 0444); MODULE_PARM_DESC(indirect_sg_entries, "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")"); module_param(allow_ext_sg, bool, 0444); MODULE_PARM_DESC(allow_ext_sg, "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)"); module_param(topspin_workarounds, int, 0444); MODULE_PARM_DESC(topspin_workarounds, "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); module_param(register_always, bool, 0444); MODULE_PARM_DESC(register_always, "Use memory registration even for contiguous memory regions"); module_param(never_register, bool, 0444); MODULE_PARM_DESC(never_register, "Never register memory"); static const struct kernel_param_ops srp_tmo_ops; static int srp_reconnect_delay = 10; module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts"); static int srp_fast_io_fail_tmo = 15; module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(fast_io_fail_tmo, "Number of seconds between the observation of a transport" " layer error and failing all I/O. \"off\" means that this" " functionality is disabled."); static int srp_dev_loss_tmo = 600; module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the SRP transport should" " insulate transport layer errors. After this time has been" " exceeded the SCSI host is removed. Should be" " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT) " if fast_io_fail_tmo has not been set. \"off\" means that" " this functionality is disabled."); static bool srp_use_imm_data = true; module_param_named(use_imm_data, srp_use_imm_data, bool, 0644); MODULE_PARM_DESC(use_imm_data, "Whether or not to request permission to use immediate data during SRP login."); static unsigned int srp_max_imm_data = 8 * 1024; module_param_named(max_imm_data, srp_max_imm_data, uint, 0644); MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size."); static unsigned ch_count; module_param(ch_count, uint, 0444); MODULE_PARM_DESC(ch_count, "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA."); static int srp_add_one(struct ib_device *device); static void srp_remove_one(struct ib_device *device, void *client_data); static void srp_rename_dev(struct ib_device *device, void *client_data); static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc); static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc, const char *opname); static int srp_ib_cm_handler(struct ib_cm_id *cm_id, const struct ib_cm_event *event); static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event); static struct scsi_transport_template *ib_srp_transport_template; static struct workqueue_struct *srp_remove_wq; static struct ib_client srp_client = { .name = "srp", .add = srp_add_one, .remove = srp_remove_one, .rename = srp_rename_dev }; static struct ib_sa_client srp_sa_client; static int srp_tmo_get(char *buffer, const struct kernel_param *kp) { int tmo = *(int *)kp->arg; if (tmo >= 0) return sysfs_emit(buffer, "%d\n", tmo); else return sysfs_emit(buffer, "off\n"); } static int srp_tmo_set(const char *val, const struct kernel_param *kp) { int tmo, res; res = srp_parse_tmo(&tmo, val); if (res) goto out; if (kp->arg == &srp_reconnect_delay) res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, srp_dev_loss_tmo); else if (kp->arg == &srp_fast_io_fail_tmo) res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo); else res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo, tmo); if (res) goto out; *(int *)kp->arg = tmo; out: return res; } static const struct kernel_param_ops srp_tmo_ops = { .get = srp_tmo_get, .set = srp_tmo_set, }; static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) { return (struct srp_target_port *) host->hostdata; } static const char *srp_target_info(struct Scsi_Host *host) { return host_to_target(host)->target_name; } static int srp_target_is_topspin(struct srp_target_port *target) { static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d }; return topspin_workarounds && (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); } static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, gfp_t gfp_mask, enum dma_data_direction direction) { struct srp_iu *iu; iu = kmalloc(sizeof *iu, gfp_mask); if (!iu) goto out; iu->buf = kzalloc(size, gfp_mask); if (!iu->buf) goto out_free_iu; iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size, direction); if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma)) goto out_free_buf; iu->size = size; iu->direction = direction; return iu; out_free_buf: kfree(iu->buf); out_free_iu: kfree(iu); out: return NULL; } static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) { if (!iu) return; ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size, iu->direction); kfree(iu->buf); kfree(iu); } static void srp_qp_event(struct ib_event *event, void *context) { pr_debug("QP event %s (%d)\n", ib_event_msg(event->event), event->event); } static int srp_init_ib_qp(struct srp_target_port *target, struct ib_qp *qp) { struct ib_qp_attr *attr; int ret; attr = kmalloc(sizeof *attr, GFP_KERNEL); if (!attr) return -ENOMEM; ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev, target->srp_host->port, be16_to_cpu(target->ib_cm.pkey), &attr->pkey_index); if (ret) goto out; attr->qp_state = IB_QPS_INIT; attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE); attr->port_num = target->srp_host->port; ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_ACCESS_FLAGS | IB_QP_PORT); out: kfree(attr); return ret; } static int srp_new_ib_cm_id(struct srp_rdma_ch *ch) { struct srp_target_port *target = ch->target; struct ib_cm_id *new_cm_id; new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, srp_ib_cm_handler, ch); if (IS_ERR(new_cm_id)) return PTR_ERR(new_cm_id); if (ch->ib_cm.cm_id) ib_destroy_cm_id(ch->ib_cm.cm_id); ch->ib_cm.cm_id = new_cm_id; if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev, target->srp_host->port)) ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA; else ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB; ch->ib_cm.path.sgid = target->sgid; ch->ib_cm.path.dgid = target->ib_cm.orig_dgid; ch->ib_cm.path.pkey = target->ib_cm.pkey; ch->ib_cm.path.service_id = target->ib_cm.service_id; return 0; } static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch) { struct srp_target_port *target = ch->target; struct rdma_cm_id *new_cm_id; int ret; new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(new_cm_id)) { ret = PTR_ERR(new_cm_id); new_cm_id = NULL; goto out; } init_completion(&ch->done); ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ? &target->rdma_cm.src.sa : NULL, &target->rdma_cm.dst.sa, SRP_PATH_REC_TIMEOUT_MS); if (ret) { pr_err("No route available from %pISpsc to %pISpsc (%d)\n", &target->rdma_cm.src, &target->rdma_cm.dst, ret); goto out; } ret = wait_for_completion_interruptible(&ch->done); if (ret < 0) goto out; ret = ch->status; if (ret) { pr_err("Resolving address %pISpsc failed (%d)\n", &target->rdma_cm.dst, ret); goto out; } swap(ch->rdma_cm.cm_id, new_cm_id); out: if (new_cm_id) rdma_destroy_id(new_cm_id); return ret; } static int srp_new_cm_id(struct srp_rdma_ch *ch) { struct srp_target_port *target = ch->target; return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) : srp_new_ib_cm_id(ch); } /** * srp_destroy_fr_pool() - free the resources owned by a pool * @pool: Fast registration pool to be destroyed. */ static void srp_destroy_fr_pool(struct srp_fr_pool *pool) { int i; struct srp_fr_desc *d; if (!pool) return; for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { if (d->mr) ib_dereg_mr(d->mr); } kfree(pool); } /** * srp_create_fr_pool() - allocate and initialize a pool for fast registration * @device: IB device to allocate fast registration descriptors for. * @pd: Protection domain associated with the FR descriptors. * @pool_size: Number of descriptors to allocate. * @max_page_list_len: Maximum fast registration work request page list length. */ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, struct ib_pd *pd, int pool_size, int max_page_list_len) { struct srp_fr_pool *pool; struct srp_fr_desc *d; struct ib_mr *mr; int i, ret = -EINVAL; enum ib_mr_type mr_type; if (pool_size <= 0) goto err; ret = -ENOMEM; pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL); if (!pool) goto err; pool->size = pool_size; pool->max_page_list_len = max_page_list_len; spin_lock_init(&pool->lock); INIT_LIST_HEAD(&pool->free_list); if (device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) mr_type = IB_MR_TYPE_SG_GAPS; else mr_type = IB_MR_TYPE_MEM_REG; for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { mr = ib_alloc_mr(pd, mr_type, max_page_list_len); if (IS_ERR(mr)) { ret = PTR_ERR(mr); if (ret == -ENOMEM) pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n", dev_name(&device->dev)); goto destroy_pool; } d->mr = mr; list_add_tail(&d->entry, &pool->free_list); } out: return pool; destroy_pool: srp_destroy_fr_pool(pool); err: pool = ERR_PTR(ret); goto out; } /** * srp_fr_pool_get() - obtain a descriptor suitable for fast registration * @pool: Pool to obtain descriptor from. */ static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool) { struct srp_fr_desc *d = NULL; unsigned long flags; spin_lock_irqsave(&pool->lock, flags); if (!list_empty(&pool->free_list)) { d = list_first_entry(&pool->free_list, typeof(*d), entry); list_del(&d->entry); } spin_unlock_irqrestore(&pool->lock, flags); return d; } /** * srp_fr_pool_put() - put an FR descriptor back in the free list * @pool: Pool the descriptor was allocated from. * @desc: Pointer to an array of fast registration descriptor pointers. * @n: Number of descriptors to put back. * * Note: The caller must already have queued an invalidation request for * desc->mr->rkey before calling this function. */ static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc, int n) { unsigned long flags; int i; spin_lock_irqsave(&pool->lock, flags); for (i = 0; i < n; i++) list_add(&desc[i]->entry, &pool->free_list); spin_unlock_irqrestore(&pool->lock, flags); } static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target) { struct srp_device *dev = target->srp_host->srp_dev; return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size, dev->max_pages_per_mr); } /** * srp_destroy_qp() - destroy an RDMA queue pair * @ch: SRP RDMA channel. * * Drain the qp before destroying it. This avoids that the receive * completion handler can access the queue pair while it is * being destroyed. */ static void srp_destroy_qp(struct srp_rdma_ch *ch) { spin_lock_irq(&ch->lock); ib_process_cq_direct(ch->send_cq, -1); spin_unlock_irq(&ch->lock); ib_drain_qp(ch->qp); ib_destroy_qp(ch->qp); } static int srp_create_ch_ib(struct srp_rdma_ch *ch) { struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; const struct ib_device_attr *attr = &dev->dev->attrs; struct ib_qp_init_attr *init_attr; struct ib_cq *recv_cq, *send_cq; struct ib_qp *qp; struct srp_fr_pool *fr_pool = NULL; const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2; int ret; init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); if (!init_attr) return -ENOMEM; /* queue_size + 1 for ib_drain_rq() */ recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1, ch->comp_vector, IB_POLL_SOFTIRQ); if (IS_ERR(recv_cq)) { ret = PTR_ERR(recv_cq); goto err; } send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size, ch->comp_vector, IB_POLL_DIRECT); if (IS_ERR(send_cq)) { ret = PTR_ERR(send_cq); goto err_recv_cq; } init_attr->event_handler = srp_qp_event; init_attr->cap.max_send_wr = m * target->queue_size; init_attr->cap.max_recv_wr = target->queue_size + 1; init_attr->cap.max_recv_sge = 1; init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge); init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; init_attr->qp_type = IB_QPT_RC; init_attr->send_cq = send_cq; init_attr->recv_cq = recv_cq; ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U); if (target->using_rdma_cm) { ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr); qp = ch->rdma_cm.cm_id->qp; } else { qp = ib_create_qp(dev->pd, init_attr); if (!IS_ERR(qp)) { ret = srp_init_ib_qp(target, qp); if (ret) ib_destroy_qp(qp); } else { ret = PTR_ERR(qp); } } if (ret) { pr_err("QP creation failed for dev %s: %d\n", dev_name(&dev->dev->dev), ret); goto err_send_cq; } if (dev->use_fast_reg) { fr_pool = srp_alloc_fr_pool(target); if (IS_ERR(fr_pool)) { ret = PTR_ERR(fr_pool); shost_printk(KERN_WARNING, target->scsi_host, PFX "FR pool allocation failed (%d)\n", ret); goto err_qp; } } if (ch->qp) srp_destroy_qp(ch); if (ch->recv_cq) ib_free_cq(ch->recv_cq); if (ch->send_cq) ib_free_cq(ch->send_cq); ch->qp = qp; ch->recv_cq = recv_cq; ch->send_cq = send_cq; if (dev->use_fast_reg) { if (ch->fr_pool) srp_destroy_fr_pool(ch->fr_pool); ch->fr_pool = fr_pool; } kfree(init_attr); return 0; err_qp: if (target->using_rdma_cm) rdma_destroy_qp(ch->rdma_cm.cm_id); else ib_destroy_qp(qp); err_send_cq: ib_free_cq(send_cq); err_recv_cq: ib_free_cq(recv_cq); err: kfree(init_attr); return ret; } /* * Note: this function may be called without srp_alloc_iu_bufs() having been * invoked. Hence the ch->[rt]x_ring checks. */ static void srp_free_ch_ib(struct srp_target_port *target, struct srp_rdma_ch *ch) { struct srp_device *dev = target->srp_host->srp_dev; int i; if (!ch->target) return; if (target->using_rdma_cm) { if (ch->rdma_cm.cm_id) { rdma_destroy_id(ch->rdma_cm.cm_id); ch->rdma_cm.cm_id = NULL; } } else { if (ch->ib_cm.cm_id) { ib_destroy_cm_id(ch->ib_cm.cm_id); ch->ib_cm.cm_id = NULL; } } /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */ if (!ch->qp) return; if (dev->use_fast_reg) { if (ch->fr_pool) srp_destroy_fr_pool(ch->fr_pool); } srp_destroy_qp(ch); ib_free_cq(ch->send_cq); ib_free_cq(ch->recv_cq); /* * Avoid that the SCSI error handler tries to use this channel after * it has been freed. The SCSI error handler can namely continue * trying to perform recovery actions after scsi_remove_host() * returned. */ ch->target = NULL; ch->qp = NULL; ch->send_cq = ch->recv_cq = NULL; if (ch->rx_ring) { for (i = 0; i < target->queue_size; ++i) srp_free_iu(target->srp_host, ch->rx_ring[i]); kfree(ch->rx_ring); ch->rx_ring = NULL; } if (ch->tx_ring) { for (i = 0; i < target->queue_size; ++i) srp_free_iu(target->srp_host, ch->tx_ring[i]); kfree(ch->tx_ring); ch->tx_ring = NULL; } } static void srp_path_rec_completion(int status, struct sa_path_rec *pathrec, unsigned int num_paths, void *ch_ptr) { struct srp_rdma_ch *ch = ch_ptr; struct srp_target_port *target = ch->target; ch->status = status; if (status) shost_printk(KERN_ERR, target->scsi_host, PFX "Got failed path rec status %d\n", status); else ch->ib_cm.path = *pathrec; complete(&ch->done); } static int srp_ib_lookup_path(struct srp_rdma_ch *ch) { struct srp_target_port *target = ch->target; int ret; ch->ib_cm.path.numb_path = 1; init_completion(&ch->done); ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client, target->srp_host->srp_dev->dev, target->srp_host->port, &ch->ib_cm.path, IB_SA_PATH_REC_SERVICE_ID | IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | IB_SA_PATH_REC_NUMB_PATH | IB_SA_PATH_REC_PKEY, SRP_PATH_REC_TIMEOUT_MS, GFP_KERNEL, srp_path_rec_completion, ch, &ch->ib_cm.path_query); if (ch->ib_cm.path_query_id < 0) return ch->ib_cm.path_query_id; ret = wait_for_completion_interruptible(&ch->done); if (ret < 0) return ret; if (ch->status < 0) shost_printk(KERN_WARNING, target->scsi_host, PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n", ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw, be16_to_cpu(target->ib_cm.pkey), be64_to_cpu(target->ib_cm.service_id)); return ch->status; } static int srp_rdma_lookup_path(struct srp_rdma_ch *ch) { struct srp_target_port *target = ch->target; int ret; init_completion(&ch->done); ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS); if (ret) return ret; wait_for_completion_interruptible(&ch->done); if (ch->status != 0) shost_printk(KERN_WARNING, target->scsi_host, PFX "Path resolution failed\n"); return ch->status; } static int srp_lookup_path(struct srp_rdma_ch *ch) { struct srp_target_port *target = ch->target; return target->using_rdma_cm ? srp_rdma_lookup_path(ch) : srp_ib_lookup_path(ch); } static u8 srp_get_subnet_timeout(struct srp_host *host) { struct ib_port_attr attr; int ret; u8 subnet_timeout = 18; ret = ib_query_port(host->srp_dev->dev, host->port, &attr); if (ret == 0) subnet_timeout = attr.subnet_timeout; if (unlikely(subnet_timeout < 15)) pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n", dev_name(&host->srp_dev->dev->dev), subnet_timeout); return subnet_timeout; } static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len, bool multich) { struct srp_target_port *target = ch->target; struct { struct rdma_conn_param rdma_param; struct srp_login_req_rdma rdma_req; struct ib_cm_req_param ib_param; struct srp_login_req ib_req; } *req = NULL; char *ipi, *tpi; int status; req = kzalloc(sizeof *req, GFP_KERNEL); if (!req) return -ENOMEM; req->ib_param.flow_control = 1; req->ib_param.retry_count = target->tl_retry_count; /* * Pick some arbitrary defaults here; we could make these * module parameters if anyone cared about setting them. */ req->ib_param.responder_resources = 4; req->ib_param.rnr_retry_count = 7; req->ib_param.max_cm_retries = 15; req->ib_req.opcode = SRP_LOGIN_REQ; req->ib_req.tag = 0; req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len); req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT); req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI : SRP_MULTICHAN_SINGLE); if (srp_use_imm_data) { req->ib_req.req_flags |= SRP_IMMED_REQUESTED; req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET); } if (target->using_rdma_cm) { req->rdma_param.flow_control = req->ib_param.flow_control; req->rdma_param.responder_resources = req->ib_param.responder_resources; req->rdma_param.initiator_depth = req->ib_param.initiator_depth; req->rdma_param.retry_count = req->ib_param.retry_count; req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count; req->rdma_param.private_data = &req->rdma_req; req->rdma_param.private_data_len = sizeof(req->rdma_req); req->rdma_req.opcode = req->ib_req.opcode; req->rdma_req.tag = req->ib_req.tag; req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len; req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt; req->rdma_req.req_flags = req->ib_req.req_flags; req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset; ipi = req->rdma_req.initiator_port_id; tpi = req->rdma_req.target_port_id; } else { u8 subnet_timeout; subnet_timeout = srp_get_subnet_timeout(target->srp_host); req->ib_param.primary_path = &ch->ib_cm.path; req->ib_param.alternate_path = NULL; req->ib_param.service_id = target->ib_cm.service_id; get_random_bytes(&req->ib_param.starting_psn, 4); req->ib_param.starting_psn &= 0xffffff; req->ib_param.qp_num = ch->qp->qp_num; req->ib_param.qp_type = ch->qp->qp_type; req->ib_param.local_cm_response_timeout = subnet_timeout + 2; req->ib_param.remote_cm_response_timeout = subnet_timeout + 2; req->ib_param.private_data = &req->ib_req; req->ib_param.private_data_len = sizeof(req->ib_req); ipi = req->ib_req.initiator_port_id; tpi = req->ib_req.target_port_id; } /* * In the published SRP specification (draft rev. 16a), the * port identifier format is 8 bytes of ID extension followed * by 8 bytes of GUID. Older drafts put the two halves in the * opposite order, so that the GUID comes first. * * Targets conforming to these obsolete drafts can be * recognized by the I/O Class they report. */ if (target->io_class == SRP_REV10_IB_IO_CLASS) { memcpy(ipi, &target->sgid.global.interface_id, 8); memcpy(ipi + 8, &target->initiator_ext, 8); memcpy(tpi, &target->ioc_guid, 8); memcpy(tpi + 8, &target->id_ext, 8); } else { memcpy(ipi, &target->initiator_ext, 8); memcpy(ipi + 8, &target->sgid.global.interface_id, 8); memcpy(tpi, &target->id_ext, 8); memcpy(tpi + 8, &target->ioc_guid, 8); } /* * Topspin/Cisco SRP targets will reject our login unless we * zero out the first 8 bytes of our initiator port ID and set * the second 8 bytes to the local node GUID. */ if (srp_target_is_topspin(target)) { shost_printk(KERN_DEBUG, target->scsi_host, PFX "Topspin/Cisco initiator port ID workaround " "activated for target GUID %016llx\n", be64_to_cpu(target->ioc_guid)); memset(ipi, 0, 8); memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8); } if (target->using_rdma_cm) status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param); else status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param); kfree(req); return status; } static bool srp_queue_remove_work(struct srp_target_port *target) { bool changed = false; spin_lock_irq(&target->lock); if (target->state != SRP_TARGET_REMOVED) { target->state = SRP_TARGET_REMOVED; changed = true; } spin_unlock_irq(&target->lock); if (changed) queue_work(srp_remove_wq, &target->remove_work); return changed; } static void srp_disconnect_target(struct srp_target_port *target) { struct srp_rdma_ch *ch; int i, ret; /* XXX should send SRP_I_LOGOUT request */ for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; ch->connected = false; ret = 0; if (target->using_rdma_cm) { if (ch->rdma_cm.cm_id) rdma_disconnect(ch->rdma_cm.cm_id); } else { if (ch->ib_cm.cm_id) ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0); } if (ret < 0) { shost_printk(KERN_DEBUG, target->scsi_host, PFX "Sending CM DREQ failed\n"); } } } static int srp_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd) { struct srp_target_port *target = host_to_target(shost); struct srp_device *dev = target->srp_host->srp_dev; struct ib_device *ibdev = dev->dev; struct srp_request *req = scsi_cmd_priv(cmd); kfree(req->fr_list); if (req->indirect_dma_addr) { ib_dma_unmap_single(ibdev, req->indirect_dma_addr, target->indirect_size, DMA_TO_DEVICE); } kfree(req->indirect_desc); return 0; } static int srp_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd) { struct srp_target_port *target = host_to_target(shost); struct srp_device *srp_dev = target->srp_host->srp_dev; struct ib_device *ibdev = srp_dev->dev; struct srp_request *req = scsi_cmd_priv(cmd); dma_addr_t dma_addr; int ret = -ENOMEM; if (srp_dev->use_fast_reg) { req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *), GFP_KERNEL); if (!req->fr_list) goto out; } req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); if (!req->indirect_desc) goto out; dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, target->indirect_size, DMA_TO_DEVICE); if (ib_dma_mapping_error(ibdev, dma_addr)) { srp_exit_cmd_priv(shost, cmd); goto out; } req->indirect_dma_addr = dma_addr; ret = 0; out: return ret; } /** * srp_del_scsi_host_attr() - Remove attributes defined in the host template. * @shost: SCSI host whose attributes to remove from sysfs. * * Note: Any attributes defined in the host template and that did not exist * before invocation of this function will be ignored. */ static void srp_del_scsi_host_attr(struct Scsi_Host *shost) { const struct attribute_group **g; struct attribute **attr; for (g = shost->hostt->shost_groups; *g; ++g) { for (attr = (*g)->attrs; *attr; ++attr) { struct device_attribute *dev_attr = container_of(*attr, typeof(*dev_attr), attr); device_remove_file(&shost->shost_dev, dev_attr); } } } static void srp_remove_target(struct srp_target_port *target) { struct srp_rdma_ch *ch; int i; WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); srp_del_scsi_host_attr(target->scsi_host); srp_rport_get(target->rport); srp_remove_host(target->scsi_host); scsi_remove_host(target->scsi_host); srp_stop_rport_timers(target->rport); srp_disconnect_target(target); kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net); for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; srp_free_ch_ib(target, ch); } cancel_work_sync(&target->tl_err_work); srp_rport_put(target->rport); kfree(target->ch); target->ch = NULL; spin_lock(&target->srp_host->target_lock); list_del(&target->list); spin_unlock(&target->srp_host->target_lock); scsi_host_put(target->scsi_host); } static void srp_remove_work(struct work_struct *work) { struct srp_target_port *target = container_of(work, struct srp_target_port, remove_work); WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); srp_remove_target(target); } static void srp_rport_delete(struct srp_rport *rport) { struct srp_target_port *target = rport->lld_data; srp_queue_remove_work(target); } /** * srp_connected_ch() - number of connected channels * @target: SRP target port. */ static int srp_connected_ch(struct srp_target_port *target) { int i, c = 0; for (i = 0; i < target->ch_count; i++) c += target->ch[i].connected; return c; } static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len, bool multich) { struct srp_target_port *target = ch->target; int ret; WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0); ret = srp_lookup_path(ch); if (ret) goto out; while (1) { init_completion(&ch->done); ret = srp_send_req(ch, max_iu_len, multich); if (ret) goto out; ret = wait_for_completion_interruptible(&ch->done); if (ret < 0) goto out; /* * The CM event handling code will set status to * SRP_PORT_REDIRECT if we get a port redirect REJ * back, or SRP_DLID_REDIRECT if we get a lid/qp * redirect REJ back. */ ret = ch->status; switch (ret) { case 0: ch->connected = true; goto out; case SRP_PORT_REDIRECT: ret = srp_lookup_path(ch); if (ret) goto out; break; case SRP_DLID_REDIRECT: break; case SRP_STALE_CONN: shost_printk(KERN_ERR, target->scsi_host, PFX "giving up on stale connection\n"); ret = -ECONNRESET; goto out; default: goto out; } } out: return ret <= 0 ? ret : -ENODEV; } static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc) { srp_handle_qp_err(cq, wc, "INV RKEY"); } static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch, u32 rkey) { struct ib_send_wr wr = { .opcode = IB_WR_LOCAL_INV, .next = NULL, .num_sge = 0, .send_flags = 0, .ex.invalidate_rkey = rkey, }; wr.wr_cqe = &req->reg_cqe; req->reg_cqe.done = srp_inv_rkey_err_done; return ib_post_send(ch->qp, &wr, NULL); } static void srp_unmap_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, struct srp_request *req) { struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; struct ib_device *ibdev = dev->dev; int i, res; if (!scsi_sglist(scmnd) || (scmnd->sc_data_direction != DMA_TO_DEVICE && scmnd->sc_data_direction != DMA_FROM_DEVICE)) return; if (dev->use_fast_reg) { struct srp_fr_desc **pfr; for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) { res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey); if (res < 0) { shost_printk(KERN_ERR, target->scsi_host, PFX "Queueing INV WR for rkey %#x failed (%d)\n", (*pfr)->mr->rkey, res); queue_work(system_long_wq, &target->tl_err_work); } } if (req->nmdesc) srp_fr_pool_put(ch->fr_pool, req->fr_list, req->nmdesc); } ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd), scmnd->sc_data_direction); } /** * srp_claim_req - Take ownership of the scmnd associated with a request. * @ch: SRP RDMA channel. * @req: SRP request. * @sdev: If not NULL, only take ownership for this SCSI device. * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take * ownership of @req->scmnd if it equals @scmnd. * * Return value: * Either NULL or a pointer to the SCSI command the caller became owner of. */ static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch, struct srp_request *req, struct scsi_device *sdev, struct scsi_cmnd *scmnd) { unsigned long flags; spin_lock_irqsave(&ch->lock, flags); if (req->scmnd && (!sdev || req->scmnd->device == sdev) && (!scmnd || req->scmnd == scmnd)) { scmnd = req->scmnd; req->scmnd = NULL; } else { scmnd = NULL; } spin_unlock_irqrestore(&ch->lock, flags); return scmnd; } /** * srp_free_req() - Unmap data and adjust ch->req_lim. * @ch: SRP RDMA channel. * @req: Request to be freed. * @scmnd: SCSI command associated with @req. * @req_lim_delta: Amount to be added to @target->req_lim. */ static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, struct scsi_cmnd *scmnd, s32 req_lim_delta) { unsigned long flags; srp_unmap_data(scmnd, ch, req); spin_lock_irqsave(&ch->lock, flags); ch->req_lim += req_lim_delta; spin_unlock_irqrestore(&ch->lock, flags); } static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req, struct scsi_device *sdev, int result) { struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL); if (scmnd) { srp_free_req(ch, req, scmnd, 0); scmnd->result = result; scsi_done(scmnd); } } struct srp_terminate_context { struct srp_target_port *srp_target; int scsi_result; }; static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr) { struct srp_terminate_context *context = context_ptr; struct srp_target_port *target = context->srp_target; u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd)); struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; struct srp_request *req = scsi_cmd_priv(scmnd); srp_finish_req(ch, req, NULL, context->scsi_result); return true; } static void srp_terminate_io(struct srp_rport *rport) { struct srp_target_port *target = rport->lld_data; struct srp_terminate_context context = { .srp_target = target, .scsi_result = DID_TRANSPORT_FAILFAST << 16 }; scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context); } /* Calculate maximum initiator to target information unit length. */ static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data, uint32_t max_it_iu_size) { uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN + sizeof(struct srp_indirect_buf) + cmd_sg_cnt * sizeof(struct srp_direct_buf); if (use_imm_data) max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET + srp_max_imm_data); if (max_it_iu_size) max_iu_len = min(max_iu_len, max_it_iu_size); pr_debug("max_iu_len = %d\n", max_iu_len); return max_iu_len; } /* * It is up to the caller to ensure that srp_rport_reconnect() calls are * serialized and that no concurrent srp_queuecommand(), srp_abort(), * srp_reset_device() or srp_reset_host() calls will occur while this function * is in progress. One way to realize that is not to call this function * directly but to call srp_reconnect_rport() instead since that last function * serializes calls of this function via rport->mutex and also blocks * srp_queuecommand() calls before invoking this function. */ static int srp_rport_reconnect(struct srp_rport *rport) { struct srp_target_port *target = rport->lld_data; struct srp_rdma_ch *ch; uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, srp_use_imm_data, target->max_it_iu_size); int i, j, ret = 0; bool multich = false; srp_disconnect_target(target); if (target->state == SRP_TARGET_SCANNING) return -ENODEV; /* * Now get a new local CM ID so that we avoid confusing the target in * case things are really fouled up. Doing so also ensures that all CM * callbacks will have finished before a new QP is allocated. */ for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; ret += srp_new_cm_id(ch); } { struct srp_terminate_context context = { .srp_target = target, .scsi_result = DID_RESET << 16}; scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context); } for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; /* * Whether or not creating a new CM ID succeeded, create a new * QP. This guarantees that all completion callback function * invocations have finished before request resetting starts. */ ret += srp_create_ch_ib(ch); INIT_LIST_HEAD(&ch->free_tx); for (j = 0; j < target->queue_size; ++j) list_add(&ch->tx_ring[j]->list, &ch->free_tx); } target->qp_in_error = false; for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; if (ret) break; ret = srp_connect_ch(ch, max_iu_len, multich); multich = true; } if (ret == 0) shost_printk(KERN_INFO, target->scsi_host, PFX "reconnect succeeded\n"); return ret; } static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr, unsigned int dma_len, u32 rkey) { struct srp_direct_buf *desc = state->desc; WARN_ON_ONCE(!dma_len); desc->va = cpu_to_be64(dma_addr); desc->key = cpu_to_be32(rkey); desc->len = cpu_to_be32(dma_len); state->total_len += dma_len; state->desc++; state->ndesc++; } static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc) { srp_handle_qp_err(cq, wc, "FAST REG"); } /* * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset * where to start in the first element. If sg_offset_p != NULL then * *sg_offset_p is updated to the offset in state->sg[retval] of the first * byte that has not yet been mapped. */ static int srp_map_finish_fr(struct srp_map_state *state, struct srp_request *req, struct srp_rdma_ch *ch, int sg_nents, unsigned int *sg_offset_p) { struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; struct ib_reg_wr wr; struct srp_fr_desc *desc; u32 rkey; int n, err; if (state->fr.next >= state->fr.end) { shost_printk(KERN_ERR, ch->target->scsi_host, PFX "Out of MRs (mr_per_cmd = %d)\n", ch->target->mr_per_cmd); return -ENOMEM; } WARN_ON_ONCE(!dev->use_fast_reg); if (sg_nents == 1 && target->global_rkey) { unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; srp_map_desc(state, sg_dma_address(state->sg) + sg_offset, sg_dma_len(state->sg) - sg_offset, target->global_rkey); if (sg_offset_p) *sg_offset_p = 0; return 1; } desc = srp_fr_pool_get(ch->fr_pool); if (!desc) return -ENOMEM; rkey = ib_inc_rkey(desc->mr->rkey); ib_update_fast_reg_key(desc->mr, rkey); n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p, dev->mr_page_size); if (unlikely(n < 0)) { srp_fr_pool_put(ch->fr_pool, &desc, 1); pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n", dev_name(&req->scmnd->device->sdev_gendev), sg_nents, sg_offset_p ? *sg_offset_p : -1, n); return n; } WARN_ON_ONCE(desc->mr->length == 0); req->reg_cqe.done = srp_reg_mr_err_done; wr.wr.next = NULL; wr.wr.opcode = IB_WR_REG_MR; wr.wr.wr_cqe = &req->reg_cqe; wr.wr.num_sge = 0; wr.wr.send_flags = 0; wr.mr = desc->mr; wr.key = desc->mr->rkey; wr.access = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE); *state->fr.next++ = desc; state->nmdesc++; srp_map_desc(state, desc->mr->iova, desc->mr->length, desc->mr->rkey); err = ib_post_send(ch->qp, &wr.wr, NULL); if (unlikely(err)) { WARN_ON_ONCE(err == -ENOMEM); return err; } return n; } static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, struct srp_request *req, struct scatterlist *scat, int count) { unsigned int sg_offset = 0; state->fr.next = req->fr_list; state->fr.end = req->fr_list + ch->target->mr_per_cmd; state->sg = scat; if (count == 0) return 0; while (count) { int i, n; n = srp_map_finish_fr(state, req, ch, count, &sg_offset); if (unlikely(n < 0)) return n; count -= n; for (i = 0; i < n; i++) state->sg = sg_next(state->sg); } return 0; } static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, struct srp_request *req, struct scatterlist *scat, int count) { struct srp_target_port *target = ch->target; struct scatterlist *sg; int i; for_each_sg(scat, sg, count, i) { srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg), target->global_rkey); } return 0; } /* * Register the indirect data buffer descriptor with the HCA. * * Note: since the indirect data buffer descriptor has been allocated with * kmalloc() it is guaranteed that this buffer is a physically contiguous * memory buffer. */ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, void **next_mr, void **end_mr, u32 idb_len, __be32 *idb_rkey) { struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; struct srp_map_state state; struct srp_direct_buf idb_desc; struct scatterlist idb_sg[1]; int ret; memset(&state, 0, sizeof(state)); memset(&idb_desc, 0, sizeof(idb_desc)); state.gen.next = next_mr; state.gen.end = end_mr; state.desc = &idb_desc; state.base_dma_addr = req->indirect_dma_addr; state.dma_len = idb_len; if (dev->use_fast_reg) { state.sg = idb_sg; sg_init_one(idb_sg, req->indirect_desc, idb_len); idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ #ifdef CONFIG_NEED_SG_DMA_LENGTH idb_sg->dma_length = idb_sg->length; /* hack^2 */ #endif ret = srp_map_finish_fr(&state, req, ch, 1, NULL); if (ret < 0) return ret; WARN_ON_ONCE(ret < 1); } else { return -EINVAL; } *idb_rkey = idb_desc.key; return 0; } static void srp_check_mapping(struct srp_map_state *state, struct srp_rdma_ch *ch, struct srp_request *req, struct scatterlist *scat, int count) { struct srp_device *dev = ch->target->srp_host->srp_dev; struct srp_fr_desc **pfr; u64 desc_len = 0, mr_len = 0; int i; for (i = 0; i < state->ndesc; i++) desc_len += be32_to_cpu(req->indirect_desc[i].len); if (dev->use_fast_reg) for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++) mr_len += (*pfr)->mr->length; if (desc_len != scsi_bufflen(req->scmnd) || mr_len > scsi_bufflen(req->scmnd)) pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n", scsi_bufflen(req->scmnd), desc_len, mr_len, state->ndesc, state->nmdesc); } /** * srp_map_data() - map SCSI data buffer onto an SRP request * @scmnd: SCSI command to map * @ch: SRP RDMA channel * @req: SRP request * * Returns the length in bytes of the SRP_CMD IU or a negative value if * mapping failed. The size of any immediate data is not included in the * return value. */ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, struct srp_request *req) { struct srp_target_port *target = ch->target; struct scatterlist *scat, *sg; struct srp_cmd *cmd = req->cmd->buf; int i, len, nents, count, ret; struct srp_device *dev; struct ib_device *ibdev; struct srp_map_state state; struct srp_indirect_buf *indirect_hdr; u64 data_len; u32 idb_len, table_len; __be32 idb_rkey; u8 fmt; req->cmd->num_sge = 1; if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) return sizeof(struct srp_cmd) + cmd->add_cdb_len; if (scmnd->sc_data_direction != DMA_FROM_DEVICE && scmnd->sc_data_direction != DMA_TO_DEVICE) { shost_printk(KERN_WARNING, target->scsi_host, PFX "Unhandled data direction %d\n", scmnd->sc_data_direction); return -EINVAL; } nents = scsi_sg_count(scmnd); scat = scsi_sglist(scmnd); data_len = scsi_bufflen(scmnd); dev = target->srp_host->srp_dev; ibdev = dev->dev; count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); if (unlikely(count == 0)) return -EIO; if (ch->use_imm_data && count <= ch->max_imm_sge && SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len && scmnd->sc_data_direction == DMA_TO_DEVICE) { struct srp_imm_buf *buf; struct ib_sge *sge = &req->cmd->sge[1]; fmt = SRP_DATA_DESC_IMM; len = SRP_IMM_DATA_OFFSET; req->nmdesc = 0; buf = (void *)cmd->add_data + cmd->add_cdb_len; buf->len = cpu_to_be32(data_len); WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len); for_each_sg(scat, sg, count, i) { sge[i].addr = sg_dma_address(sg); sge[i].length = sg_dma_len(sg); sge[i].lkey = target->lkey; } req->cmd->num_sge += count; goto map_complete; } fmt = SRP_DATA_DESC_DIRECT; len = sizeof(struct srp_cmd) + cmd->add_cdb_len + sizeof(struct srp_direct_buf); if (count == 1 && target->global_rkey) { /* * The midlayer only generated a single gather/scatter * entry, or DMA mapping coalesced everything to a * single entry. So a direct descriptor along with * the DMA MR suffices. */ struct srp_direct_buf *buf; buf = (void *)cmd->add_data + cmd->add_cdb_len; buf->va = cpu_to_be64(sg_dma_address(scat)); buf->key = cpu_to_be32(target->global_rkey); buf->len = cpu_to_be32(sg_dma_len(scat)); req->nmdesc = 0; goto map_complete; } /* * We have more than one scatter/gather entry, so build our indirect * descriptor table, trying to merge as many entries as we can. */ indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len; ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr, target->indirect_size, DMA_TO_DEVICE); memset(&state, 0, sizeof(state)); state.desc = req->indirect_desc; if (dev->use_fast_reg) ret = srp_map_sg_fr(&state, ch, req, scat, count); else ret = srp_map_sg_dma(&state, ch, req, scat, count); req->nmdesc = state.nmdesc; if (ret < 0) goto unmap; { DEFINE_DYNAMIC_DEBUG_METADATA(ddm, "Memory mapping consistency check"); if (DYNAMIC_DEBUG_BRANCH(ddm)) srp_check_mapping(&state, ch, req, scat, count); } /* We've mapped the request, now pull as much of the indirect * descriptor table as we can into the command buffer. If this * target is not using an external indirect table, we are * guaranteed to fit into the command, as the SCSI layer won't * give us more S/G entries than we allow. */ if (state.ndesc == 1) { /* * Memory registration collapsed the sg-list into one entry, * so use a direct descriptor. */ struct srp_direct_buf *buf; buf = (void *)cmd->add_data + cmd->add_cdb_len; *buf = req->indirect_desc[0]; goto map_complete; } if (unlikely(target->cmd_sg_cnt < state.ndesc && !target->allow_ext_sg)) { shost_printk(KERN_ERR, target->scsi_host, "Could not fit S/G list into SRP_CMD\n"); ret = -EIO; goto unmap; } count = min(state.ndesc, target->cmd_sg_cnt); table_len = state.ndesc * sizeof (struct srp_direct_buf); idb_len = sizeof(struct srp_indirect_buf) + table_len; fmt = SRP_DATA_DESC_INDIRECT; len = sizeof(struct srp_cmd) + cmd->add_cdb_len + sizeof(struct srp_indirect_buf); len += count * sizeof (struct srp_direct_buf); memcpy(indirect_hdr->desc_list, req->indirect_desc, count * sizeof (struct srp_direct_buf)); if (!target->global_rkey) { ret = srp_map_idb(ch, req, state.gen.next, state.gen.end, idb_len, &idb_rkey); if (ret < 0) goto unmap; req->nmdesc++; } else { idb_rkey = cpu_to_be32(target->global_rkey); } indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); indirect_hdr->table_desc.key = idb_rkey; indirect_hdr->table_desc.len = cpu_to_be32(table_len); indirect_hdr->len = cpu_to_be32(state.total_len); if (scmnd->sc_data_direction == DMA_TO_DEVICE) cmd->data_out_desc_cnt = count; else cmd->data_in_desc_cnt = count; ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len, DMA_TO_DEVICE); map_complete: if (scmnd->sc_data_direction == DMA_TO_DEVICE) cmd->buf_fmt = fmt << 4; else cmd->buf_fmt = fmt; return len; unmap: srp_unmap_data(scmnd, ch, req); if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size) ret = -E2BIG; return ret; } /* * Return an IU and possible credit to the free pool */ static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu, enum srp_iu_type iu_type) { unsigned long flags; spin_lock_irqsave(&ch->lock, flags); list_add(&iu->list, &ch->free_tx); if (iu_type != SRP_IU_RSP) ++ch->req_lim; spin_unlock_irqrestore(&ch->lock, flags); } /* * Must be called with ch->lock held to protect req_lim and free_tx. * If IU is not sent, it must be returned using srp_put_tx_iu(). * * Note: * An upper limit for the number of allocated information units for each * request type is: * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues * more than Scsi_Host.can_queue requests. * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than * one unanswered SRP request to an initiator. */ static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch, enum srp_iu_type iu_type) { struct srp_target_port *target = ch->target; s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; struct srp_iu *iu; lockdep_assert_held(&ch->lock); ib_process_cq_direct(ch->send_cq, -1); if (list_empty(&ch->free_tx)) return NULL; /* Initiator responses to target requests do not consume credits */ if (iu_type != SRP_IU_RSP) { if (ch->req_lim <= rsv) { ++target->zero_req_lim; return NULL; } --ch->req_lim; } iu = list_first_entry(&ch->free_tx, struct srp_iu, list); list_del(&iu->list); return iu; } /* * Note: if this function is called from inside ib_drain_sq() then it will * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE * with status IB_WC_SUCCESS then that's a bug. */ static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc) { struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); struct srp_rdma_ch *ch = cq->cq_context; if (unlikely(wc->status != IB_WC_SUCCESS)) { srp_handle_qp_err(cq, wc, "SEND"); return; } lockdep_assert_held(&ch->lock); list_add(&iu->list, &ch->free_tx); } /** * srp_post_send() - send an SRP information unit * @ch: RDMA channel over which to send the information unit. * @iu: Information unit to send. * @len: Length of the information unit excluding immediate data. */ static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len) { struct srp_target_port *target = ch->target; struct ib_send_wr wr; if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE)) return -EINVAL; iu->sge[0].addr = iu->dma; iu->sge[0].length = len; iu->sge[0].lkey = target->lkey; iu->cqe.done = srp_send_done; wr.next = NULL; wr.wr_cqe = &iu->cqe; wr.sg_list = &iu->sge[0]; wr.num_sge = iu->num_sge; wr.opcode = IB_WR_SEND; wr.send_flags = IB_SEND_SIGNALED; return ib_post_send(ch->qp, &wr, NULL); } static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu) { struct srp_target_port *target = ch->target; struct ib_recv_wr wr; struct ib_sge list; list.addr = iu->dma; list.length = iu->size; list.lkey = target->lkey; iu->cqe.done = srp_recv_done; wr.next = NULL; wr.wr_cqe = &iu->cqe; wr.sg_list = &list; wr.num_sge = 1; return ib_post_recv(ch->qp, &wr, NULL); } static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) { struct srp_target_port *target = ch->target; struct srp_request *req; struct scsi_cmnd *scmnd; unsigned long flags; if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { spin_lock_irqsave(&ch->lock, flags); ch->req_lim += be32_to_cpu(rsp->req_lim_delta); if (rsp->tag == ch->tsk_mgmt_tag) { ch->tsk_mgmt_status = -1; if (be32_to_cpu(rsp->resp_data_len) >= 4) ch->tsk_mgmt_status = rsp->data[3]; complete(&ch->tsk_mgmt_done); } else { shost_printk(KERN_ERR, target->scsi_host, "Received tsk mgmt response too late for tag %#llx\n", rsp->tag); } spin_unlock_irqrestore(&ch->lock, flags); } else { scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); if (scmnd) { req = scsi_cmd_priv(scmnd); scmnd = srp_claim_req(ch, req, NULL, scmnd); } if (!scmnd) { shost_printk(KERN_ERR, target->scsi_host, "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n", rsp->tag, ch - target->ch, ch->qp->qp_num); spin_lock_irqsave(&ch->lock, flags); ch->req_lim += be32_to_cpu(rsp->req_lim_delta); spin_unlock_irqrestore(&ch->lock, flags); return; } scmnd->result = rsp->status; if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { memcpy(scmnd->sense_buffer, rsp->data + be32_to_cpu(rsp->resp_data_len), min_t(int, be32_to_cpu(rsp->sense_data_len), SCSI_SENSE_BUFFERSIZE)); } if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER)) scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER)) scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); srp_free_req(ch, req, scmnd, be32_to_cpu(rsp->req_lim_delta)); scsi_done(scmnd); } } static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta, void *rsp, int len) { struct srp_target_port *target = ch->target; struct ib_device *dev = target->srp_host->srp_dev->dev; unsigned long flags; struct srp_iu *iu; int err; spin_lock_irqsave(&ch->lock, flags); ch->req_lim += req_delta; iu = __srp_get_tx_iu(ch, SRP_IU_RSP); spin_unlock_irqrestore(&ch->lock, flags); if (!iu) { shost_printk(KERN_ERR, target->scsi_host, PFX "no IU available to send response\n"); return 1; } iu->num_sge = 1; ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); memcpy(iu->buf, rsp, len); ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); err = srp_post_send(ch, iu, len); if (err) { shost_printk(KERN_ERR, target->scsi_host, PFX "unable to post response: %d\n", err); srp_put_tx_iu(ch, iu, SRP_IU_RSP); } return err; } static void srp_process_cred_req(struct srp_rdma_ch *ch, struct srp_cred_req *req) { struct srp_cred_rsp rsp = { .opcode = SRP_CRED_RSP, .tag = req->tag, }; s32 delta = be32_to_cpu(req->req_lim_delta); if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) shost_printk(KERN_ERR, ch->target->scsi_host, PFX "problems processing SRP_CRED_REQ\n"); } static void srp_process_aer_req(struct srp_rdma_ch *ch, struct srp_aer_req *req) { struct srp_target_port *target = ch->target; struct srp_aer_rsp rsp = { .opcode = SRP_AER_RSP, .tag = req->tag, }; s32 delta = be32_to_cpu(req->req_lim_delta); shost_printk(KERN_ERR, target->scsi_host, PFX "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun)); if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) shost_printk(KERN_ERR, target->scsi_host, PFX "problems processing SRP_AER_REQ\n"); } static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc) { struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); struct srp_rdma_ch *ch = cq->cq_context; struct srp_target_port *target = ch->target; struct ib_device *dev = target->srp_host->srp_dev->dev; int res; u8 opcode; if (unlikely(wc->status != IB_WC_SUCCESS)) { srp_handle_qp_err(cq, wc, "RECV"); return; } ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len, DMA_FROM_DEVICE); opcode = *(u8 *) iu->buf; if (0) { shost_printk(KERN_ERR, target->scsi_host, PFX "recv completion, opcode 0x%02x\n", opcode); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1, iu->buf, wc->byte_len, true); } switch (opcode) { case SRP_RSP: srp_process_rsp(ch, iu->buf); break; case SRP_CRED_REQ: srp_process_cred_req(ch, iu->buf); break; case SRP_AER_REQ: srp_process_aer_req(ch, iu->buf); break; case SRP_T_LOGOUT: /* XXX Handle target logout */ shost_printk(KERN_WARNING, target->scsi_host, PFX "Got target logout request\n"); break; default: shost_printk(KERN_WARNING, target->scsi_host, PFX "Unhandled SRP opcode 0x%02x\n", opcode); break; } ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len, DMA_FROM_DEVICE); res = srp_post_recv(ch, iu); if (res != 0) shost_printk(KERN_ERR, target->scsi_host, PFX "Recv failed with error code %d\n", res); } /** * srp_tl_err_work() - handle a transport layer error * @work: Work structure embedded in an SRP target port. * * Note: This function may get invoked before the rport has been created, * hence the target->rport test. */ static void srp_tl_err_work(struct work_struct *work) { struct srp_target_port *target; target = container_of(work, struct srp_target_port, tl_err_work); if (target->rport) srp_start_tl_fail_timers(target->rport); } static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc, const char *opname) { struct srp_rdma_ch *ch = cq->cq_context; struct srp_target_port *target = ch->target; if (ch->connected && !target->qp_in_error) { shost_printk(KERN_ERR, target->scsi_host, PFX "failed %s status %s (%d) for CQE %p\n", opname, ib_wc_status_msg(wc->status), wc->status, wc->wr_cqe); queue_work(system_long_wq, &target->tl_err_work); } target->qp_in_error = true; } static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) { struct request *rq = scsi_cmd_to_rq(scmnd); struct srp_target_port *target = host_to_target(shost); struct srp_rdma_ch *ch; struct srp_request *req = scsi_cmd_priv(scmnd); struct srp_iu *iu; struct srp_cmd *cmd; struct ib_device *dev; unsigned long flags; u32 tag; int len, ret; scmnd->result = srp_chkready(target->rport); if (unlikely(scmnd->result)) goto err; WARN_ON_ONCE(rq->tag < 0); tag = blk_mq_unique_tag(rq); ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; spin_lock_irqsave(&ch->lock, flags); iu = __srp_get_tx_iu(ch, SRP_IU_CMD); spin_unlock_irqrestore(&ch->lock, flags); if (!iu) goto err; dev = target->srp_host->srp_dev->dev; ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len, DMA_TO_DEVICE); cmd = iu->buf; memset(cmd, 0, sizeof *cmd); cmd->opcode = SRP_CMD; int_to_scsilun(scmnd->device->lun, &cmd->lun); cmd->tag = tag; memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) { cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb), 4); if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN)) goto err_iu; } req->scmnd = scmnd; req->cmd = iu; len = srp_map_data(scmnd, ch, req); if (len < 0) { shost_printk(KERN_ERR, target->scsi_host, PFX "Failed to map data (%d)\n", len); /* * If we ran out of memory descriptors (-ENOMEM) because an * application is queuing many requests with more than * max_pages_per_mr sg-list elements, tell the SCSI mid-layer * to reduce queue depth temporarily. */ scmnd->result = len == -ENOMEM ? DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16; goto err_iu; } ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len, DMA_TO_DEVICE); if (srp_post_send(ch, iu, len)) { shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); scmnd->result = DID_ERROR << 16; goto err_unmap; } return 0; err_unmap: srp_unmap_data(scmnd, ch, req); err_iu: srp_put_tx_iu(ch, iu, SRP_IU_CMD); /* * Avoid that the loops that iterate over the request ring can * encounter a dangling SCSI command pointer. */ req->scmnd = NULL; err: if (scmnd->result) { scsi_done(scmnd); ret = 0; } else { ret = SCSI_MLQUEUE_HOST_BUSY; } return ret; } /* * Note: the resources allocated in this function are freed in * srp_free_ch_ib(). */ static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch) { struct srp_target_port *target = ch->target; int i; ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), GFP_KERNEL); if (!ch->rx_ring) goto err_no_ring; ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring), GFP_KERNEL); if (!ch->tx_ring) goto err_no_ring; for (i = 0; i < target->queue_size; ++i) { ch->rx_ring[i] = srp_alloc_iu(target->srp_host, ch->max_ti_iu_len, GFP_KERNEL, DMA_FROM_DEVICE); if (!ch->rx_ring[i]) goto err; } for (i = 0; i < target->queue_size; ++i) { ch->tx_ring[i] = srp_alloc_iu(target->srp_host, ch->max_it_iu_len, GFP_KERNEL, DMA_TO_DEVICE); if (!ch->tx_ring[i]) goto err; list_add(&ch->tx_ring[i]->list, &ch->free_tx); } return 0; err: for (i = 0; i < target->queue_size; ++i) { srp_free_iu(target->srp_host, ch->rx_ring[i]); srp_free_iu(target->srp_host, ch->tx_ring[i]); } err_no_ring: kfree(ch->tx_ring); ch->tx_ring = NULL; kfree(ch->rx_ring); ch->rx_ring = NULL; return -ENOMEM; } static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask) { uint64_t T_tr_ns, max_compl_time_ms; uint32_t rq_tmo_jiffies; /* * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair, * table 91), both the QP timeout and the retry count have to be set * for RC QP's during the RTR to RTS transition. */ WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) != (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)); /* * Set target->rq_tmo_jiffies to one second more than the largest time * it can take before an error completion is generated. See also * C9-140..142 in the IBTA spec for more information about how to * convert the QP Local ACK Timeout value to nanoseconds. */ T_tr_ns = 4096 * (1ULL << qp_attr->timeout); max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns; do_div(max_compl_time_ms, NSEC_PER_MSEC); rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000); return rq_tmo_jiffies; } static void srp_cm_rep_handler(struct ib_cm_id *cm_id, const struct srp_login_rsp *lrsp, struct srp_rdma_ch *ch) { struct srp_target_port *target = ch->target; struct ib_qp_attr *qp_attr = NULL; int attr_mask = 0; int ret = 0; int i; if (lrsp->opcode == SRP_LOGIN_RSP) { ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); ch->use_imm_data = srp_use_imm_data && (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP); ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, ch->use_imm_data, target->max_it_iu_size); WARN_ON_ONCE(ch->max_it_iu_len > be32_to_cpu(lrsp->max_it_iu_len)); if (ch->use_imm_data) shost_printk(KERN_DEBUG, target->scsi_host, PFX "using immediate data\n"); /* * Reserve credits for task management so we don't * bounce requests back to the SCSI mid-layer. */ target->scsi_host->can_queue = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE, target->scsi_host->can_queue); target->scsi_host->cmd_per_lun = min_t(int, target->scsi_host->can_queue, target->scsi_host->cmd_per_lun); } else { shost_printk(KERN_WARNING, target->scsi_host, PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); ret = -ECONNRESET; goto error; } if (!ch->rx_ring) { ret = srp_alloc_iu_bufs(ch); if (ret) goto error; } for (i = 0; i < target->queue_size; i++) { struct srp_iu *iu = ch->rx_ring[i]; ret = srp_post_recv(ch, iu); if (ret) goto error; } if (!target->using_rdma_cm) { ret = -ENOMEM; qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL); if (!qp_attr) goto error; qp_attr->qp_state = IB_QPS_RTR; ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); if (ret) goto error_free; ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); if (ret) goto error_free; qp_attr->qp_state = IB_QPS_RTS; ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); if (ret) goto error_free; target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); if (ret) goto error_free; ret = ib_send_cm_rtu(cm_id, NULL, 0); } error_free: kfree(qp_attr); error: ch->status = ret; } static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id, const struct ib_cm_event *event, struct srp_rdma_ch *ch) { struct srp_target_port *target = ch->target; struct Scsi_Host *shost = target->scsi_host; struct ib_class_port_info *cpi; int opcode; u16 dlid; switch (event->param.rej_rcvd.reason) { case IB_CM_REJ_PORT_CM_REDIRECT: cpi = event->param.rej_rcvd.ari; dlid = be16_to_cpu(cpi->redirect_lid); sa_path_set_dlid(&ch->ib_cm.path, dlid); ch->ib_cm.path.pkey = cpi->redirect_pkey; cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16); ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; break; case IB_CM_REJ_PORT_REDIRECT: if (srp_target_is_topspin(target)) { union ib_gid *dgid = &ch->ib_cm.path.dgid; /* * Topspin/Cisco SRP gateways incorrectly send * reject reason code 25 when they mean 24 * (port redirect). */ memcpy(dgid->raw, event->param.rej_rcvd.ari, 16); shost_printk(KERN_DEBUG, shost, PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", be64_to_cpu(dgid->global.subnet_prefix), be64_to_cpu(dgid->global.interface_id)); ch->status = SRP_PORT_REDIRECT; } else { shost_printk(KERN_WARNING, shost, " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); ch->status = -ECONNRESET; } break; case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: shost_printk(KERN_WARNING, shost, " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); ch->status = -ECONNRESET; break; case IB_CM_REJ_CONSUMER_DEFINED: opcode = *(u8 *) event->private_data; if (opcode == SRP_LOGIN_REJ) { struct srp_login_rej *rej = event->private_data; u32 reason = be32_to_cpu(rej->reason); if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) shost_printk(KERN_WARNING, shost, PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); else shost_printk(KERN_WARNING, shost, PFX "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n", target->sgid.raw, target->ib_cm.orig_dgid.raw, reason); } else shost_printk(KERN_WARNING, shost, " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," " opcode 0x%02x\n", opcode); ch->status = -ECONNRESET; break; case IB_CM_REJ_STALE_CONN: shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); ch->status = SRP_STALE_CONN; break; default: shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", event->param.rej_rcvd.reason); ch->status = -ECONNRESET; } } static int srp_ib_cm_handler(struct ib_cm_id *cm_id, const struct ib_cm_event *event) { struct srp_rdma_ch *ch = cm_id->context; struct srp_target_port *target = ch->target; int comp = 0; switch (event->event) { case IB_CM_REQ_ERROR: shost_printk(KERN_DEBUG, target->scsi_host, PFX "Sending CM REQ failed\n"); comp = 1; ch->status = -ECONNRESET; break; case IB_CM_REP_RECEIVED: comp = 1; srp_cm_rep_handler(cm_id, event->private_data, ch); break; case IB_CM_REJ_RECEIVED: shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); comp = 1; srp_ib_cm_rej_handler(cm_id, event, ch); break; case IB_CM_DREQ_RECEIVED: shost_printk(KERN_WARNING, target->scsi_host, PFX "DREQ received - connection closed\n"); ch->connected = false; if (ib_send_cm_drep(cm_id, NULL, 0)) shost_printk(KERN_ERR, target->scsi_host, PFX "Sending CM DREP failed\n"); queue_work(system_long_wq, &target->tl_err_work); break; case IB_CM_TIMEWAIT_EXIT: shost_printk(KERN_ERR, target->scsi_host, PFX "connection closed\n"); comp = 1; ch->status = 0; break; case IB_CM_MRA_RECEIVED: case IB_CM_DREQ_ERROR: case IB_CM_DREP_RECEIVED: break; default: shost_printk(KERN_WARNING, target->scsi_host, PFX "Unhandled CM event %d\n", event->event); break; } if (comp) complete(&ch->done); return 0; } static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch, struct rdma_cm_event *event) { struct srp_target_port *target = ch->target; struct Scsi_Host *shost = target->scsi_host; int opcode; switch (event->status) { case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: shost_printk(KERN_WARNING, shost, " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); ch->status = -ECONNRESET; break; case IB_CM_REJ_CONSUMER_DEFINED: opcode = *(u8 *) event->param.conn.private_data; if (opcode == SRP_LOGIN_REJ) { struct srp_login_rej *rej = (struct srp_login_rej *) event->param.conn.private_data; u32 reason = be32_to_cpu(rej->reason); if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) shost_printk(KERN_WARNING, shost, PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); else shost_printk(KERN_WARNING, shost, PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason); } else { shost_printk(KERN_WARNING, shost, " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n", opcode); } ch->status = -ECONNRESET; break; case IB_CM_REJ_STALE_CONN: shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); ch->status = SRP_STALE_CONN; break; default: shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", event->status); ch->status = -ECONNRESET; break; } } static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct srp_rdma_ch *ch = cm_id->context; struct srp_target_port *target = ch->target; int comp = 0; switch (event->event) { case RDMA_CM_EVENT_ADDR_RESOLVED: ch->status = 0; comp = 1; break; case RDMA_CM_EVENT_ADDR_ERROR: ch->status = -ENXIO; comp = 1; break; case RDMA_CM_EVENT_ROUTE_RESOLVED: ch->status = 0; comp = 1; break; case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_UNREACHABLE: ch->status = -EHOSTUNREACH; comp = 1; break; case RDMA_CM_EVENT_CONNECT_ERROR: shost_printk(KERN_DEBUG, target->scsi_host, PFX "Sending CM REQ failed\n"); comp = 1; ch->status = -ECONNRESET; break; case RDMA_CM_EVENT_ESTABLISHED: comp = 1; srp_cm_rep_handler(NULL, event->param.conn.private_data, ch); break; case RDMA_CM_EVENT_REJECTED: shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); comp = 1; srp_rdma_cm_rej_handler(ch, event); break; case RDMA_CM_EVENT_DISCONNECTED: if (ch->connected) { shost_printk(KERN_WARNING, target->scsi_host, PFX "received DREQ\n"); rdma_disconnect(ch->rdma_cm.cm_id); comp = 1; ch->status = 0; queue_work(system_long_wq, &target->tl_err_work); } break; case RDMA_CM_EVENT_TIMEWAIT_EXIT: shost_printk(KERN_ERR, target->scsi_host, PFX "connection closed\n"); comp = 1; ch->status = 0; break; default: shost_printk(KERN_WARNING, target->scsi_host, PFX "Unhandled CM event %d\n", event->event); break; } if (comp) complete(&ch->done); return 0; } /** * srp_change_queue_depth - setting device queue depth * @sdev: scsi device struct * @qdepth: requested queue depth * * Returns queue depth. */ static int srp_change_queue_depth(struct scsi_device *sdev, int qdepth) { if (!sdev->tagged_supported) qdepth = 1; return scsi_change_queue_depth(sdev, qdepth); } static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, u8 func, u8 *status) { struct srp_target_port *target = ch->target; struct srp_rport *rport = target->rport; struct ib_device *dev = target->srp_host->srp_dev->dev; struct srp_iu *iu; struct srp_tsk_mgmt *tsk_mgmt; int res; if (!ch->connected || target->qp_in_error) return -1; /* * Lock the rport mutex to avoid that srp_create_ch_ib() is * invoked while a task management function is being sent. */ mutex_lock(&rport->mutex); spin_lock_irq(&ch->lock); iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT); spin_unlock_irq(&ch->lock); if (!iu) { mutex_unlock(&rport->mutex); return -1; } iu->num_sge = 1; ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, DMA_TO_DEVICE); tsk_mgmt = iu->buf; memset(tsk_mgmt, 0, sizeof *tsk_mgmt); tsk_mgmt->opcode = SRP_TSK_MGMT; int_to_scsilun(lun, &tsk_mgmt->lun); tsk_mgmt->tsk_mgmt_func = func; tsk_mgmt->task_tag = req_tag; spin_lock_irq(&ch->lock); ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT; tsk_mgmt->tag = ch->tsk_mgmt_tag; spin_unlock_irq(&ch->lock); init_completion(&ch->tsk_mgmt_done); ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, DMA_TO_DEVICE); if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) { srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT); mutex_unlock(&rport->mutex); return -1; } res = wait_for_completion_timeout(&ch->tsk_mgmt_done, msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)); if (res > 0 && status) *status = ch->tsk_mgmt_status; mutex_unlock(&rport->mutex); WARN_ON_ONCE(res < 0); return res > 0 ? 0 : -1; } static int srp_abort(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_request *req = scsi_cmd_priv(scmnd); u32 tag; u16 ch_idx; struct srp_rdma_ch *ch; shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd)); ch_idx = blk_mq_unique_tag_to_hwq(tag); if (WARN_ON_ONCE(ch_idx >= target->ch_count)) return SUCCESS; ch = &target->ch[ch_idx]; if (!srp_claim_req(ch, req, NULL, scmnd)) return SUCCESS; shost_printk(KERN_ERR, target->scsi_host, "Sending SRP abort for tag %#x\n", tag); if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, SRP_TSK_ABORT_TASK, NULL) == 0) { srp_free_req(ch, req, scmnd, 0); return SUCCESS; } if (target->rport->state == SRP_RPORT_LOST) return FAST_IO_FAIL; return FAILED; } static int srp_reset_device(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_rdma_ch *ch; u8 status; shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); ch = &target->ch[0]; if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun, SRP_TSK_LUN_RESET, &status)) return FAILED; if (status) return FAILED; return SUCCESS; } static int srp_reset_host(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED; } static int srp_target_alloc(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct srp_target_port *target = host_to_target(shost); if (target->target_can_queue) starget->can_queue = target->target_can_queue; return 0; } static int srp_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) { struct Scsi_Host *shost = sdev->host; struct srp_target_port *target = host_to_target(shost); struct request_queue *q = sdev->request_queue; unsigned long timeout; if (sdev->type == TYPE_DISK) { timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies); blk_queue_rq_timeout(q, timeout); } return 0; } static ssize_t id_ext_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext)); } static DEVICE_ATTR_RO(id_ext); static ssize_t ioc_guid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid)); } static DEVICE_ATTR_RO(ioc_guid); static ssize_t service_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); if (target->using_rdma_cm) return -ENOENT; return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ib_cm.service_id)); } static DEVICE_ATTR_RO(service_id); static ssize_t pkey_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); if (target->using_rdma_cm) return -ENOENT; return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey)); } static DEVICE_ATTR_RO(pkey); static ssize_t sgid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); return sysfs_emit(buf, "%pI6\n", target->sgid.raw); } static DEVICE_ATTR_RO(sgid); static ssize_t dgid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); struct srp_rdma_ch *ch = &target->ch[0]; if (target->using_rdma_cm) return -ENOENT; return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw); } static DEVICE_ATTR_RO(dgid); static ssize_t orig_dgid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); if (target->using_rdma_cm) return -ENOENT; return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw); } static DEVICE_ATTR_RO(orig_dgid); static ssize_t req_lim_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); struct srp_rdma_ch *ch; int i, req_lim = INT_MAX; for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; req_lim = min(req_lim, ch->req_lim); } return sysfs_emit(buf, "%d\n", req_lim); } static DEVICE_ATTR_RO(req_lim); static ssize_t zero_req_lim_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); return sysfs_emit(buf, "%d\n", target->zero_req_lim); } static DEVICE_ATTR_RO(zero_req_lim); static ssize_t local_ib_port_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); return sysfs_emit(buf, "%u\n", target->srp_host->port); } static DEVICE_ATTR_RO(local_ib_port); static ssize_t local_ib_device_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); return sysfs_emit(buf, "%s\n", dev_name(&target->srp_host->srp_dev->dev->dev)); } static DEVICE_ATTR_RO(local_ib_device); static ssize_t ch_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); return sysfs_emit(buf, "%d\n", target->ch_count); } static DEVICE_ATTR_RO(ch_count); static ssize_t comp_vector_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); return sysfs_emit(buf, "%d\n", target->comp_vector); } static DEVICE_ATTR_RO(comp_vector); static ssize_t tl_retry_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); return sysfs_emit(buf, "%d\n", target->tl_retry_count); } static DEVICE_ATTR_RO(tl_retry_count); static ssize_t cmd_sg_entries_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt); } static DEVICE_ATTR_RO(cmd_sg_entries); static ssize_t allow_ext_sg_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false"); } static DEVICE_ATTR_RO(allow_ext_sg); static struct attribute *srp_host_attrs[] = { &dev_attr_id_ext.attr, &dev_attr_ioc_guid.attr, &dev_attr_service_id.attr, &dev_attr_pkey.attr, &dev_attr_sgid.attr, &dev_attr_dgid.attr, &dev_attr_orig_dgid.attr, &dev_attr_req_lim.attr, &dev_attr_zero_req_lim.attr, &dev_attr_local_ib_port.attr, &dev_attr_local_ib_device.attr, &dev_attr_ch_count.attr, &dev_attr_comp_vector.attr, &dev_attr_tl_retry_count.attr, &dev_attr_cmd_sg_entries.attr, &dev_attr_allow_ext_sg.attr, NULL }; ATTRIBUTE_GROUPS(srp_host); static const struct scsi_host_template srp_template = { .module = THIS_MODULE, .name = "InfiniBand SRP initiator", .proc_name = DRV_NAME, .target_alloc = srp_target_alloc, .sdev_configure = srp_sdev_configure, .info = srp_target_info, .init_cmd_priv = srp_init_cmd_priv, .exit_cmd_priv = srp_exit_cmd_priv, .queuecommand = srp_queuecommand, .change_queue_depth = srp_change_queue_depth, .eh_timed_out = srp_timed_out, .eh_abort_handler = srp_abort, .eh_device_reset_handler = srp_reset_device, .eh_host_reset_handler = srp_reset_host, .skip_settle_delay = true, .sg_tablesize = SRP_DEF_SG_TABLESIZE, .can_queue = SRP_DEFAULT_CMD_SQ_SIZE, .this_id = -1, .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, .shost_groups = srp_host_groups, .track_queue_depth = 1, .cmd_size = sizeof(struct srp_request), }; static int srp_sdev_count(struct Scsi_Host *host) { struct scsi_device *sdev; int c = 0; shost_for_each_device(sdev, host) c++; return c; } /* * Return values: * < 0 upon failure. Caller is responsible for SRP target port cleanup. * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port * removal has been scheduled. * 0 and target->state != SRP_TARGET_REMOVED upon success. */ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) { struct srp_rport_identifiers ids; struct srp_rport *rport; target->state = SRP_TARGET_SCANNING; sprintf(target->target_name, "SRP.T10:%016llX", be64_to_cpu(target->id_ext)); if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent)) return -ENODEV; memcpy(ids.port_id, &target->id_ext, 8); memcpy(ids.port_id + 8, &target->ioc_guid, 8); ids.roles = SRP_RPORT_ROLE_TARGET; rport = srp_rport_add(target->scsi_host, &ids); if (IS_ERR(rport)) { scsi_remove_host(target->scsi_host); return PTR_ERR(rport); } rport->lld_data = target; target->rport = rport; spin_lock(&host->target_lock); list_add_tail(&target->list, &host->target_list); spin_unlock(&host->target_lock); scsi_scan_target(&target->scsi_host->shost_gendev, 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL); if (srp_connected_ch(target) < target->ch_count || target->qp_in_error) { shost_printk(KERN_INFO, target->scsi_host, PFX "SCSI scan failed - removing SCSI host\n"); srp_queue_remove_work(target); goto out; } pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n", dev_name(&target->scsi_host->shost_gendev), srp_sdev_count(target->scsi_host)); spin_lock_irq(&target->lock); if (target->state == SRP_TARGET_SCANNING) target->state = SRP_TARGET_LIVE; spin_unlock_irq(&target->lock); out: return 0; } static void srp_release_dev(struct device *dev) { struct srp_host *host = container_of(dev, struct srp_host, dev); kfree(host); } static struct attribute *srp_class_attrs[]; ATTRIBUTE_GROUPS(srp_class); static struct class srp_class = { .name = "infiniband_srp", .dev_groups = srp_class_groups, .dev_release = srp_release_dev }; /** * srp_conn_unique() - check whether the connection to a target is unique * @host: SRP host. * @target: SRP target port. */ static bool srp_conn_unique(struct srp_host *host, struct srp_target_port *target) { struct srp_target_port *t; bool ret = false; if (target->state == SRP_TARGET_REMOVED) goto out; ret = true; spin_lock(&host->target_lock); list_for_each_entry(t, &host->target_list, list) { if (t != target && target->id_ext == t->id_ext && target->ioc_guid == t->ioc_guid && target->initiator_ext == t->initiator_ext) { ret = false; break; } } spin_unlock(&host->target_lock); out: return ret; } /* * Target ports are added by writing * * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, * pkey=<P_Key>,service_id=<service ID> * or * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>, * [src=<IPv4 address>,]dest=<IPv4 address>:<port number> * * to the add_target sysfs attribute. */ enum { SRP_OPT_ERR = 0, SRP_OPT_ID_EXT = 1 << 0, SRP_OPT_IOC_GUID = 1 << 1, SRP_OPT_DGID = 1 << 2, SRP_OPT_PKEY = 1 << 3, SRP_OPT_SERVICE_ID = 1 << 4, SRP_OPT_MAX_SECT = 1 << 5, SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, SRP_OPT_IO_CLASS = 1 << 7, SRP_OPT_INITIATOR_EXT = 1 << 8, SRP_OPT_CMD_SG_ENTRIES = 1 << 9, SRP_OPT_ALLOW_EXT_SG = 1 << 10, SRP_OPT_SG_TABLESIZE = 1 << 11, SRP_OPT_COMP_VECTOR = 1 << 12, SRP_OPT_TL_RETRY_COUNT = 1 << 13, SRP_OPT_QUEUE_SIZE = 1 << 14, SRP_OPT_IP_SRC = 1 << 15, SRP_OPT_IP_DEST = 1 << 16, SRP_OPT_TARGET_CAN_QUEUE= 1 << 17, SRP_OPT_MAX_IT_IU_SIZE = 1 << 18, SRP_OPT_CH_COUNT = 1 << 19, }; static unsigned int srp_opt_mandatory[] = { SRP_OPT_ID_EXT | SRP_OPT_IOC_GUID | SRP_OPT_DGID | SRP_OPT_PKEY | SRP_OPT_SERVICE_ID, SRP_OPT_ID_EXT | SRP_OPT_IOC_GUID | SRP_OPT_IP_DEST, }; static const match_table_t srp_opt_tokens = { { SRP_OPT_ID_EXT, "id_ext=%s" }, { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, { SRP_OPT_DGID, "dgid=%s" }, { SRP_OPT_PKEY, "pkey=%x" }, { SRP_OPT_SERVICE_ID, "service_id=%s" }, { SRP_OPT_MAX_SECT, "max_sect=%d" }, { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" }, { SRP_OPT_IO_CLASS, "io_class=%x" }, { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" }, { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" }, { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" }, { SRP_OPT_COMP_VECTOR, "comp_vector=%u" }, { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" }, { SRP_OPT_QUEUE_SIZE, "queue_size=%d" }, { SRP_OPT_IP_SRC, "src=%s" }, { SRP_OPT_IP_DEST, "dest=%s" }, { SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" }, { SRP_OPT_CH_COUNT, "ch_count=%u", }, { SRP_OPT_ERR, NULL } }; /** * srp_parse_in - parse an IP address and port number combination * @net: [in] Network namespace. * @sa: [out] Address family, IP address and port number. * @addr_port_str: [in] IP address and port number. * @has_port: [out] Whether or not @addr_port_str includes a port number. * * Parse the following address formats: * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5. * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5. */ static int srp_parse_in(struct net *net, struct sockaddr_storage *sa, const char *addr_port_str, bool *has_port) { char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL); char *port_str; int ret; if (!addr) return -ENOMEM; port_str = strrchr(addr, ':'); if (port_str && strchr(port_str, ']')) port_str = NULL; if (port_str) *port_str++ = '\0'; if (has_port) *has_port = port_str != NULL; ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa); if (ret && addr[0]) { addr_end = addr + strlen(addr) - 1; if (addr[0] == '[' && *addr_end == ']') { *addr_end = '\0'; ret = inet_pton_with_scope(net, AF_INET6, addr + 1, port_str, sa); } } kfree(addr); pr_debug("%s -> %pISpfsc\n", addr_port_str, sa); return ret; } static int srp_parse_options(struct net *net, const char *buf, struct srp_target_port *target) { char *options, *sep_opt; char *p; substring_t args[MAX_OPT_ARGS]; unsigned long long ull; bool has_port; int opt_mask = 0; int token; int ret = -EINVAL; int i; options = kstrdup(buf, GFP_KERNEL); if (!options) return -ENOMEM; sep_opt = options; while ((p = strsep(&sep_opt, ",\n")) != NULL) { if (!*p) continue; token = match_token(p, srp_opt_tokens, args); opt_mask |= token; switch (token) { case SRP_OPT_ID_EXT: p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } ret = kstrtoull(p, 16, &ull); if (ret) { pr_warn("invalid id_ext parameter '%s'\n", p); kfree(p); goto out; } target->id_ext = cpu_to_be64(ull); kfree(p); break; case SRP_OPT_IOC_GUID: p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } ret = kstrtoull(p, 16, &ull); if (ret) { pr_warn("invalid ioc_guid parameter '%s'\n", p); kfree(p); goto out; } target->ioc_guid = cpu_to_be64(ull); kfree(p); break; case SRP_OPT_DGID: p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } if (strlen(p) != 32) { pr_warn("bad dest GID parameter '%s'\n", p); kfree(p); goto out; } ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16); kfree(p); if (ret < 0) goto out; break; case SRP_OPT_PKEY: ret = match_hex(args, &token); if (ret) { pr_warn("bad P_Key parameter '%s'\n", p); goto out; } target->ib_cm.pkey = cpu_to_be16(token); break; case SRP_OPT_SERVICE_ID: p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } ret = kstrtoull(p, 16, &ull); if (ret) { pr_warn("bad service_id parameter '%s'\n", p); kfree(p); goto out; } target->ib_cm.service_id = cpu_to_be64(ull); kfree(p); break; case SRP_OPT_IP_SRC: p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } ret = srp_parse_in(net, &target->rdma_cm.src.ss, p, NULL); if (ret < 0) { pr_warn("bad source parameter '%s'\n", p); kfree(p); goto out; } target->rdma_cm.src_specified = true; kfree(p); break; case SRP_OPT_IP_DEST: p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p, &has_port); if (!has_port) ret = -EINVAL; if (ret < 0) { pr_warn("bad dest parameter '%s'\n", p); kfree(p); goto out; } target->using_rdma_cm = true; kfree(p); break; case SRP_OPT_MAX_SECT: ret = match_int(args, &token); if (ret) { pr_warn("bad max sect parameter '%s'\n", p); goto out; } target->scsi_host->max_sectors = token; break; case SRP_OPT_QUEUE_SIZE: ret = match_int(args, &token); if (ret) { pr_warn("match_int() failed for queue_size parameter '%s', Error %d\n", p, ret); goto out; } if (token < 1) { pr_warn("bad queue_size parameter '%s'\n", p); ret = -EINVAL; goto out; } target->scsi_host->can_queue = token; target->queue_size = token + SRP_RSP_SQ_SIZE + SRP_TSK_MGMT_SQ_SIZE; if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) target->scsi_host->cmd_per_lun = token; break; case SRP_OPT_MAX_CMD_PER_LUN: ret = match_int(args, &token); if (ret) { pr_warn("match_int() failed for max cmd_per_lun parameter '%s', Error %d\n", p, ret); goto out; } if (token < 1) { pr_warn("bad max cmd_per_lun parameter '%s'\n", p); ret = -EINVAL; goto out; } target->scsi_host->cmd_per_lun = token; break; case SRP_OPT_TARGET_CAN_QUEUE: ret = match_int(args, &token); if (ret) { pr_warn("match_int() failed for max target_can_queue parameter '%s', Error %d\n", p, ret); goto out; } if (token < 1) { pr_warn("bad max target_can_queue parameter '%s'\n", p); ret = -EINVAL; goto out; } target->target_can_queue = token; break; case SRP_OPT_IO_CLASS: ret = match_hex(args, &token); if (ret) { pr_warn("bad IO class parameter '%s'\n", p); goto out; } if (token != SRP_REV10_IB_IO_CLASS && token != SRP_REV16A_IB_IO_CLASS) { pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n", token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS); ret = -EINVAL; goto out; } target->io_class = token; break; case SRP_OPT_INITIATOR_EXT: p = match_strdup(args); if (!p) { ret = -ENOMEM; goto out; } ret = kstrtoull(p, 16, &ull); if (ret) { pr_warn("bad initiator_ext value '%s'\n", p); kfree(p); goto out; } target->initiator_ext = cpu_to_be64(ull); kfree(p); break; case SRP_OPT_CMD_SG_ENTRIES: ret = match_int(args, &token); if (ret) { pr_warn("match_int() failed for max cmd_sg_entries parameter '%s', Error %d\n", p, ret); goto out; } if (token < 1 || token > 255) { pr_warn("bad max cmd_sg_entries parameter '%s'\n", p); ret = -EINVAL; goto out; } target->cmd_sg_cnt = token; break; case SRP_OPT_ALLOW_EXT_SG: ret = match_int(args, &token); if (ret) { pr_warn("bad allow_ext_sg parameter '%s'\n", p); goto out; } target->allow_ext_sg = !!token; break; case SRP_OPT_SG_TABLESIZE: ret = match_int(args, &token); if (ret) { pr_warn("match_int() failed for max sg_tablesize parameter '%s', Error %d\n", p, ret); goto out; } if (token < 1 || token > SG_MAX_SEGMENTS) { pr_warn("bad max sg_tablesize parameter '%s'\n", p); ret = -EINVAL; goto out; } target->sg_tablesize = token; break; case SRP_OPT_COMP_VECTOR: ret = match_int(args, &token); if (ret) { pr_warn("match_int() failed for comp_vector parameter '%s', Error %d\n", p, ret); goto out; } if (token < 0) { pr_warn("bad comp_vector parameter '%s'\n", p); ret = -EINVAL; goto out; } target->comp_vector = token; break; case SRP_OPT_TL_RETRY_COUNT: ret = match_int(args, &token); if (ret) { pr_warn("match_int() failed for tl_retry_count parameter '%s', Error %d\n", p, ret); goto out; } if (token < 2 || token > 7) { pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n", p); ret = -EINVAL; goto out; } target->tl_retry_count = token; break; case SRP_OPT_MAX_IT_IU_SIZE: ret = match_int(args, &token); if (ret) { pr_warn("match_int() failed for max it_iu_size parameter '%s', Error %d\n", p, ret); goto out; } if (token < 0) { pr_warn("bad maximum initiator to target IU size '%s'\n", p); ret = -EINVAL; goto out; } target->max_it_iu_size = token; break; case SRP_OPT_CH_COUNT: ret = match_int(args, &token); if (ret) { pr_warn("match_int() failed for channel count parameter '%s', Error %d\n", p, ret); goto out; } if (token < 1) { pr_warn("bad channel count %s\n", p); ret = -EINVAL; goto out; } target->ch_count = token; break; default: pr_warn("unknown parameter or missing value '%s' in target creation request\n", p); ret = -EINVAL; goto out; } } for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) { if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) { ret = 0; break; } } if (ret) pr_warn("target creation request is missing one or more parameters\n"); if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) pr_warn("cmd_per_lun = %d > queue_size = %d\n", target->scsi_host->cmd_per_lun, target->scsi_host->can_queue); out: kfree(options); return ret; } static ssize_t add_target_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct srp_host *host = container_of(dev, struct srp_host, dev); struct Scsi_Host *target_host; struct srp_target_port *target; struct srp_rdma_ch *ch; struct srp_device *srp_dev = host->srp_dev; struct ib_device *ibdev = srp_dev->dev; int ret, i, ch_idx; unsigned int max_sectors_per_mr, mr_per_cmd = 0; bool multich = false; uint32_t max_iu_len; target_host = scsi_host_alloc(&srp_template, sizeof (struct srp_target_port)); if (!target_host) return -ENOMEM; target_host->transportt = ib_srp_transport_template; target_host->max_channel = 0; target_host->max_id = 1; target_host->max_lun = -1LL; target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; if (ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) target_host->max_segment_size = ib_dma_max_seg_size(ibdev); else target_host->virt_boundary_mask = ~srp_dev->mr_page_mask; target = host_to_target(target_host); target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET); target->io_class = SRP_REV16A_IB_IO_CLASS; target->scsi_host = target_host; target->srp_host = host; target->lkey = host->srp_dev->pd->local_dma_lkey; target->global_rkey = host->srp_dev->global_rkey; target->cmd_sg_cnt = cmd_sg_entries; target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; target->allow_ext_sg = allow_ext_sg; target->tl_retry_count = 7; target->queue_size = SRP_DEFAULT_QUEUE_SIZE; /* * Avoid that the SCSI host can be removed by srp_remove_target() * before this function returns. */ scsi_host_get(target->scsi_host); ret = mutex_lock_interruptible(&host->add_target_mutex); if (ret < 0) goto put; ret = srp_parse_options(target->net, buf, target); if (ret) goto out; if (!srp_conn_unique(target->srp_host, target)) { if (target->using_rdma_cm) { shost_printk(KERN_INFO, target->scsi_host, PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n", be64_to_cpu(target->id_ext), be64_to_cpu(target->ioc_guid), &target->rdma_cm.dst); } else { shost_printk(KERN_INFO, target->scsi_host, PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n", be64_to_cpu(target->id_ext), be64_to_cpu(target->ioc_guid), be64_to_cpu(target->initiator_ext)); } ret = -EEXIST; goto out; } if (!srp_dev->has_fr && !target->allow_ext_sg && target->cmd_sg_cnt < target->sg_tablesize) { pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); target->sg_tablesize = target->cmd_sg_cnt; } if (srp_dev->use_fast_reg) { bool gaps_reg = ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG; max_sectors_per_mr = srp_dev->max_pages_per_mr << (ilog2(srp_dev->mr_page_size) - 9); if (!gaps_reg) { /* * FR can only map one HCA page per entry. If the start * address is not aligned on a HCA page boundary two * entries will be used for the head and the tail * although these two entries combined contain at most * one HCA page of data. Hence the "+ 1" in the * calculation below. * * The indirect data buffer descriptor is contiguous * so the memory for that buffer will only be * registered if register_always is true. Hence add * one to mr_per_cmd if register_always has been set. */ mr_per_cmd = register_always + (target->scsi_host->max_sectors + 1 + max_sectors_per_mr - 1) / max_sectors_per_mr; } else { mr_per_cmd = register_always + (target->sg_tablesize + srp_dev->max_pages_per_mr - 1) / srp_dev->max_pages_per_mr; } pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n", target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size, max_sectors_per_mr, mr_per_cmd); } target_host->sg_tablesize = target->sg_tablesize; target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd; target->mr_per_cmd = mr_per_cmd; target->indirect_size = target->sg_tablesize * sizeof (struct srp_direct_buf); max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, srp_use_imm_data, target->max_it_iu_size); INIT_WORK(&target->tl_err_work, srp_tl_err_work); INIT_WORK(&target->remove_work, srp_remove_work); spin_lock_init(&target->lock); ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid); if (ret) goto out; ret = -ENOMEM; if (target->ch_count == 0) { target->ch_count = min(ch_count ?: max(4 * num_online_nodes(), ibdev->num_comp_vectors), num_online_cpus()); } target->ch = kcalloc(target->ch_count, sizeof(*target->ch), GFP_KERNEL); if (!target->ch) goto out; for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) { ch = &target->ch[ch_idx]; ch->target = target; ch->comp_vector = ch_idx % ibdev->num_comp_vectors; spin_lock_init(&ch->lock); INIT_LIST_HEAD(&ch->free_tx); ret = srp_new_cm_id(ch); if (ret) goto err_disconnect; ret = srp_create_ch_ib(ch); if (ret) goto err_disconnect; ret = srp_connect_ch(ch, max_iu_len, multich); if (ret) { char dst[64]; if (target->using_rdma_cm) snprintf(dst, sizeof(dst), "%pIS", &target->rdma_cm.dst); else snprintf(dst, sizeof(dst), "%pI6", target->ib_cm.orig_dgid.raw); shost_printk(KERN_ERR, target->scsi_host, PFX "Connection %d/%d to %s failed\n", ch_idx, target->ch_count, dst); if (ch_idx == 0) { goto free_ch; } else { srp_free_ch_ib(target, ch); target->ch_count = ch - target->ch; goto connected; } } multich = true; } connected: target->scsi_host->nr_hw_queues = target->ch_count; ret = srp_add_target(host, target); if (ret) goto err_disconnect; if (target->state != SRP_TARGET_REMOVED) { if (target->using_rdma_cm) { shost_printk(KERN_DEBUG, target->scsi_host, PFX "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n", be64_to_cpu(target->id_ext), be64_to_cpu(target->ioc_guid), target->sgid.raw, &target->rdma_cm.dst); } else { shost_printk(KERN_DEBUG, target->scsi_host, PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n", be64_to_cpu(target->id_ext), be64_to_cpu(target->ioc_guid), be16_to_cpu(target->ib_cm.pkey), be64_to_cpu(target->ib_cm.service_id), target->sgid.raw, target->ib_cm.orig_dgid.raw); } } ret = count; out: mutex_unlock(&host->add_target_mutex); put: scsi_host_put(target->scsi_host); if (ret < 0) { /* * If a call to srp_remove_target() has not been scheduled, * drop the network namespace reference now that was obtained * earlier in this function. */ if (target->state != SRP_TARGET_REMOVED) kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net); scsi_host_put(target->scsi_host); } return ret; err_disconnect: srp_disconnect_target(target); free_ch: for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; srp_free_ch_ib(target, ch); } kfree(target->ch); goto out; } static DEVICE_ATTR_WO(add_target); static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_host *host = container_of(dev, struct srp_host, dev); return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev)); } static DEVICE_ATTR_RO(ibdev); static ssize_t port_show(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_host *host = container_of(dev, struct srp_host, dev); return sysfs_emit(buf, "%u\n", host->port); } static DEVICE_ATTR_RO(port); static struct attribute *srp_class_attrs[] = { &dev_attr_add_target.attr, &dev_attr_ibdev.attr, &dev_attr_port.attr, NULL }; static struct srp_host *srp_add_port(struct srp_device *device, u32 port) { struct srp_host *host; host = kzalloc(sizeof *host, GFP_KERNEL); if (!host) return NULL; INIT_LIST_HEAD(&host->target_list); spin_lock_init(&host->target_lock); mutex_init(&host->add_target_mutex); host->srp_dev = device; host->port = port; device_initialize(&host->dev); host->dev.class = &srp_class; host->dev.parent = device->dev->dev.parent; if (dev_set_name(&host->dev, "srp-%s-%u", dev_name(&device->dev->dev), port)) goto put_host; if (device_add(&host->dev)) goto put_host; return host; put_host: put_device(&host->dev); return NULL; } static void srp_rename_dev(struct ib_device *device, void *client_data) { struct srp_device *srp_dev = client_data; struct srp_host *host, *tmp_host; list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { char name[IB_DEVICE_NAME_MAX + 8]; snprintf(name, sizeof(name), "srp-%s-%u", dev_name(&device->dev), host->port); device_rename(&host->dev, name); } } static int srp_add_one(struct ib_device *device) { struct srp_device *srp_dev; struct ib_device_attr *attr = &device->attrs; struct srp_host *host; int mr_page_shift; u32 p; u64 max_pages_per_mr; unsigned int flags = 0; srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL); if (!srp_dev) return -ENOMEM; /* * Use the smallest page size supported by the HCA, down to a * minimum of 4096 bytes. We're unlikely to build large sglists * out of smaller entries. */ mr_page_shift = max(12, ffs(attr->page_size_cap) - 1); srp_dev->mr_page_size = 1 << mr_page_shift; srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1); max_pages_per_mr = attr->max_mr_size; do_div(max_pages_per_mr, srp_dev->mr_page_size); pr_debug("%s: %llu / %u = %llu <> %u\n", __func__, attr->max_mr_size, srp_dev->mr_page_size, max_pages_per_mr, SRP_MAX_PAGES_PER_MR); srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, max_pages_per_mr); srp_dev->has_fr = (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS); if (!never_register && !srp_dev->has_fr) dev_warn(&device->dev, "FR is not supported\n"); else if (!never_register && attr->max_mr_size >= 2 * srp_dev->mr_page_size) srp_dev->use_fast_reg = srp_dev->has_fr; if (never_register || !register_always || !srp_dev->has_fr) flags |= IB_PD_UNSAFE_GLOBAL_RKEY; if (srp_dev->use_fast_reg) { srp_dev->max_pages_per_mr = min_t(u32, srp_dev->max_pages_per_mr, attr->max_fast_reg_page_list_len); } srp_dev->mr_max_size = srp_dev->mr_page_size * srp_dev->max_pages_per_mr; pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n", dev_name(&device->dev), mr_page_shift, attr->max_mr_size, attr->max_fast_reg_page_list_len, srp_dev->max_pages_per_mr, srp_dev->mr_max_size); INIT_LIST_HEAD(&srp_dev->dev_list); srp_dev->dev = device; srp_dev->pd = ib_alloc_pd(device, flags); if (IS_ERR(srp_dev->pd)) { int ret = PTR_ERR(srp_dev->pd); kfree(srp_dev); return ret; } if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey; WARN_ON_ONCE(srp_dev->global_rkey == 0); } rdma_for_each_port (device, p) { host = srp_add_port(srp_dev, p); if (host) list_add_tail(&host->list, &srp_dev->dev_list); } ib_set_client_data(device, &srp_client, srp_dev); return 0; } static void srp_remove_one(struct ib_device *device, void *client_data) { struct srp_device *srp_dev; struct srp_host *host, *tmp_host; struct srp_target_port *target; srp_dev = client_data; list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { /* * Remove the add_target sysfs entry so that no new target ports * can be created. */ device_del(&host->dev); /* * Remove all target ports. */ spin_lock(&host->target_lock); list_for_each_entry(target, &host->target_list, list) srp_queue_remove_work(target); spin_unlock(&host->target_lock); /* * srp_queue_remove_work() queues a call to * srp_remove_target(). The latter function cancels * target->tl_err_work so waiting for the remove works to * finish is sufficient. */ flush_workqueue(srp_remove_wq); put_device(&host->dev); } ib_dealloc_pd(srp_dev->pd); kfree(srp_dev); } static struct srp_function_template ib_srp_transport_functions = { .has_rport_state = true, .reset_timer_if_blocked = true, .reconnect_delay = &srp_reconnect_delay, .fast_io_fail_tmo = &srp_fast_io_fail_tmo, .dev_loss_tmo = &srp_dev_loss_tmo, .reconnect = srp_rport_reconnect, .rport_delete = srp_rport_delete, .terminate_rport_io = srp_terminate_io, }; static int __init srp_init_module(void) { int ret; BUILD_BUG_ON(sizeof(struct srp_aer_req) != 36); BUILD_BUG_ON(sizeof(struct srp_cmd) != 48); BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4); BUILD_BUG_ON(sizeof(struct srp_indirect_buf) != 20); BUILD_BUG_ON(sizeof(struct srp_login_req) != 64); BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56); BUILD_BUG_ON(sizeof(struct srp_rsp) != 36); if (srp_sg_tablesize) { pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); if (!cmd_sg_entries) cmd_sg_entries = srp_sg_tablesize; } if (!cmd_sg_entries) cmd_sg_entries = SRP_DEF_SG_TABLESIZE; if (cmd_sg_entries > 255) { pr_warn("Clamping cmd_sg_entries to 255\n"); cmd_sg_entries = 255; } if (!indirect_sg_entries) indirect_sg_entries = cmd_sg_entries; else if (indirect_sg_entries < cmd_sg_entries) { pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", cmd_sg_entries); indirect_sg_entries = cmd_sg_entries; } if (indirect_sg_entries > SG_MAX_SEGMENTS) { pr_warn("Clamping indirect_sg_entries to %u\n", SG_MAX_SEGMENTS); indirect_sg_entries = SG_MAX_SEGMENTS; } srp_remove_wq = create_workqueue("srp_remove"); if (!srp_remove_wq) { ret = -ENOMEM; goto out; } ret = -ENOMEM; ib_srp_transport_template = srp_attach_transport(&ib_srp_transport_functions); if (!ib_srp_transport_template) goto destroy_wq; ret = class_register(&srp_class); if (ret) { pr_err("couldn't register class infiniband_srp\n"); goto release_tr; } ib_sa_register_client(&srp_sa_client); ret = ib_register_client(&srp_client); if (ret) { pr_err("couldn't register IB client\n"); goto unreg_sa; } out: return ret; unreg_sa: ib_sa_unregister_client(&srp_sa_client); class_unregister(&srp_class); release_tr: srp_release_transport(ib_srp_transport_template); destroy_wq: destroy_workqueue(srp_remove_wq); goto out; } static void __exit srp_cleanup_module(void) { ib_unregister_client(&srp_client); ib_sa_unregister_client(&srp_sa_client); class_unregister(&srp_class); srp_release_transport(ib_srp_transport_template); destroy_workqueue(srp_remove_wq); } module_init(srp_init_module); module_exit(srp_cleanup_module);
5798 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 /* SPDX-License-Identifier: GPL-2.0+ */ /* * RCU-based infrastructure for lightweight reader-writer locking * * Copyright (c) 2015, Red Hat, Inc. * * Author: Oleg Nesterov <oleg@redhat.com> */ #ifndef _LINUX_RCU_SYNC_H_ #define _LINUX_RCU_SYNC_H_ #include <linux/wait.h> #include <linux/rcupdate.h> /* Structure to mediate between updaters and fastpath-using readers. */ struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; struct rcu_head cb_head; }; /** * rcu_sync_is_idle() - Are readers permitted to use their fastpaths? * @rsp: Pointer to rcu_sync structure to use for synchronization * * Returns true if readers are permitted to use their fastpaths. Must be * invoked within some flavor of RCU read-side critical section. */ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) { RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(), "suspicious rcu_sync_is_idle() usage"); return !READ_ONCE(rsp->gp_state); /* GP_IDLE */ } extern void rcu_sync_init(struct rcu_sync *); extern void rcu_sync_enter(struct rcu_sync *); extern void rcu_sync_exit(struct rcu_sync *); extern void rcu_sync_dtor(struct rcu_sync *); #define __RCU_SYNC_INITIALIZER(name) { \ .gp_state = 0, \ .gp_count = 0, \ .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \ } #define DEFINE_RCU_SYNC(name) \ struct rcu_sync name = __RCU_SYNC_INITIALIZER(name) #endif /* _LINUX_RCU_SYNC_H_ */
2 17 1 16 1 16 2 2 1 1 12 12 18 17 3 13 17 58 58 45 3 45 4 20 1 1 1 1 1 1 3 3 3 3 7 7 7 16 16 15 1 1 16 16 15 1 7 9 4 12 18 18 7 7 7 6 1 1 6 6 2 4 11 11 11 11 2 9 9 9 1 8 9 18 15 2 1 18 3 15 2 16 7 11 7 3 12 3 2 13 16 2 19 18 1 19 18 19 19 18 1 16 3 1 18 16 3 19 19 56 19 52 19 19 19 18 24 21 3 2 53 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 // SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2011 Novell Inc. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/fileattr.h> #include <linux/splice.h> #include <linux/xattr.h> #include <linux/security.h> #include <linux/uaccess.h> #include <linux/sched/signal.h> #include <linux/cred.h> #include <linux/namei.h> #include <linux/ratelimit.h> #include <linux/exportfs.h> #include "overlayfs.h" #define OVL_COPY_UP_CHUNK_SIZE (1 << 20) static int ovl_ccup_set(const char *buf, const struct kernel_param *param) { pr_warn("\"check_copy_up\" module option is obsolete\n"); return 0; } static int ovl_ccup_get(char *buf, const struct kernel_param *param) { return sprintf(buf, "N\n"); } module_param_call(check_copy_up, ovl_ccup_set, ovl_ccup_get, NULL, 0644); MODULE_PARM_DESC(check_copy_up, "Obsolete; does nothing"); static bool ovl_must_copy_xattr(const char *name) { return !strcmp(name, XATTR_POSIX_ACL_ACCESS) || !strcmp(name, XATTR_POSIX_ACL_DEFAULT) || !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN); } static int ovl_copy_acl(struct ovl_fs *ofs, const struct path *path, struct dentry *dentry, const char *acl_name) { int err; struct posix_acl *clone, *real_acl = NULL; real_acl = ovl_get_acl_path(path, acl_name, false); if (!real_acl) return 0; if (IS_ERR(real_acl)) { err = PTR_ERR(real_acl); if (err == -ENODATA || err == -EOPNOTSUPP) return 0; return err; } clone = posix_acl_clone(real_acl, GFP_KERNEL); posix_acl_release(real_acl); /* release original acl */ if (!clone) return -ENOMEM; err = ovl_do_set_acl(ofs, dentry, acl_name, clone); /* release cloned acl */ posix_acl_release(clone); return err; } int ovl_copy_xattr(struct super_block *sb, const struct path *oldpath, struct dentry *new) { struct dentry *old = oldpath->dentry; ssize_t list_size, size, value_size = 0; char *buf, *name, *value = NULL; int error = 0; size_t slen; if (!old->d_inode->i_op->listxattr || !new->d_inode->i_op->listxattr) return 0; list_size = vfs_listxattr(old, NULL, 0); if (list_size <= 0) { if (list_size == -EOPNOTSUPP) return 0; return list_size; } buf = kvzalloc(list_size, GFP_KERNEL); if (!buf) return -ENOMEM; list_size = vfs_listxattr(old, buf, list_size); if (list_size <= 0) { error = list_size; goto out; } for (name = buf; list_size; name += slen) { slen = strnlen(name, list_size) + 1; /* underlying fs providing us with an broken xattr list? */ if (WARN_ON(slen > list_size)) { error = -EIO; break; } list_size -= slen; if (ovl_is_private_xattr(sb, name)) continue; error = security_inode_copy_up_xattr(old, name); if (error == -ECANCELED) { error = 0; continue; /* Discard */ } if (error < 0 && error != -EOPNOTSUPP) break; if (is_posix_acl_xattr(name)) { error = ovl_copy_acl(OVL_FS(sb), oldpath, new, name); if (!error) continue; /* POSIX ACLs must be copied. */ break; } retry: size = ovl_do_getxattr(oldpath, name, value, value_size); if (size == -ERANGE) size = ovl_do_getxattr(oldpath, name, NULL, 0); if (size < 0) { error = size; break; } if (size > value_size) { void *new; new = kvmalloc(size, GFP_KERNEL); if (!new) { error = -ENOMEM; break; } kvfree(value); value = new; value_size = size; goto retry; } error = ovl_do_setxattr(OVL_FS(sb), new, name, value, size, 0); if (error) { if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name)) break; /* Ignore failure to copy unknown xattrs */ error = 0; } } kvfree(value); out: kvfree(buf); return error; } static int ovl_copy_fileattr(struct inode *inode, const struct path *old, const struct path *new) { struct file_kattr oldfa = { .flags_valid = true }; struct file_kattr newfa = { .flags_valid = true }; int err; err = ovl_real_fileattr_get(old, &oldfa); if (err) { /* Ntfs-3g returns -EINVAL for "no fileattr support" */ if (err == -ENOTTY || err == -EINVAL) return 0; pr_warn("failed to retrieve lower fileattr (%pd2, err=%i)\n", old->dentry, err); return err; } /* * We cannot set immutable and append-only flags on upper inode, * because we would not be able to link upper inode to upper dir * not set overlay private xattr on upper inode. * Store these flags in overlay.protattr xattr instead. */ if (oldfa.flags & OVL_PROT_FS_FLAGS_MASK) { err = ovl_set_protattr(inode, new->dentry, &oldfa); if (err == -EPERM) pr_warn_once("copying fileattr: no xattr on upper\n"); else if (err) return err; } /* Don't bother copying flags if none are set */ if (!(oldfa.flags & OVL_COPY_FS_FLAGS_MASK)) return 0; err = ovl_real_fileattr_get(new, &newfa); if (err) { /* * Returning an error if upper doesn't support fileattr will * result in a regression, so revert to the old behavior. */ if (err == -ENOTTY || err == -EINVAL) { pr_warn_once("copying fileattr: no support on upper\n"); return 0; } pr_warn("failed to retrieve upper fileattr (%pd2, err=%i)\n", new->dentry, err); return err; } BUILD_BUG_ON(OVL_COPY_FS_FLAGS_MASK & ~FS_COMMON_FL); newfa.flags &= ~OVL_COPY_FS_FLAGS_MASK; newfa.flags |= (oldfa.flags & OVL_COPY_FS_FLAGS_MASK); BUILD_BUG_ON(OVL_COPY_FSX_FLAGS_MASK & ~FS_XFLAG_COMMON); newfa.fsx_xflags &= ~OVL_COPY_FSX_FLAGS_MASK; newfa.fsx_xflags |= (oldfa.fsx_xflags & OVL_COPY_FSX_FLAGS_MASK); return ovl_real_fileattr_set(new, &newfa); } static int ovl_verify_area(loff_t pos, loff_t pos2, loff_t len, loff_t totlen) { loff_t tmp; if (pos != pos2) return -EIO; if (pos < 0 || len < 0 || totlen < 0) return -EIO; if (check_add_overflow(pos, len, &tmp)) return -EIO; return 0; } static int ovl_sync_file(const struct path *path) { struct file *new_file; int err; new_file = ovl_path_open(path, O_LARGEFILE | O_RDONLY); if (IS_ERR(new_file)) return PTR_ERR(new_file); err = vfs_fsync(new_file, 0); fput(new_file); return err; } static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry, struct file *new_file, loff_t len, bool datasync) { struct path datapath; struct file *old_file; loff_t old_pos = 0; loff_t new_pos = 0; loff_t cloned; loff_t data_pos = -1; loff_t hole_len; bool skip_hole = false; int error = 0; ovl_path_lowerdata(dentry, &datapath); if (WARN_ON_ONCE(datapath.dentry == NULL) || WARN_ON_ONCE(len < 0)) return -EIO; old_file = ovl_path_open(&datapath, O_LARGEFILE | O_RDONLY); if (IS_ERR(old_file)) return PTR_ERR(old_file); /* Try to use clone_file_range to clone up within the same fs */ cloned = vfs_clone_file_range(old_file, 0, new_file, 0, len, 0); if (cloned == len) goto out_fput; /* Couldn't clone, so now we try to copy the data */ error = rw_verify_area(READ, old_file, &old_pos, len); if (!error) error = rw_verify_area(WRITE, new_file, &new_pos, len); if (error) goto out_fput; /* Check if lower fs supports seek operation */ if (old_file->f_mode & FMODE_LSEEK) skip_hole = true; while (len) { size_t this_len = OVL_COPY_UP_CHUNK_SIZE; ssize_t bytes; if (len < this_len) this_len = len; if (signal_pending_state(TASK_KILLABLE, current)) { error = -EINTR; break; } /* * Fill zero for hole will cost unnecessary disk space * and meanwhile slow down the copy-up speed, so we do * an optimization for hole during copy-up, it relies * on SEEK_DATA implementation in lower fs so if lower * fs does not support it, copy-up will behave as before. * * Detail logic of hole detection as below: * When we detect next data position is larger than current * position we will skip that hole, otherwise we copy * data in the size of OVL_COPY_UP_CHUNK_SIZE. Actually, * it may not recognize all kind of holes and sometimes * only skips partial of hole area. However, it will be * enough for most of the use cases. * * We do not hold upper sb_writers throughout the loop to avert * lockdep warning with llseek of lower file in nested overlay: * - upper sb_writers * -- lower ovl_inode_lock (ovl_llseek) */ if (skip_hole && data_pos < old_pos) { data_pos = vfs_llseek(old_file, old_pos, SEEK_DATA); if (data_pos > old_pos) { hole_len = data_pos - old_pos; len -= hole_len; old_pos = new_pos = data_pos; continue; } else if (data_pos == -ENXIO) { break; } else if (data_pos < 0) { skip_hole = false; } } error = ovl_verify_area(old_pos, new_pos, this_len, len); if (error) break; bytes = do_splice_direct(old_file, &old_pos, new_file, &new_pos, this_len, SPLICE_F_MOVE); if (bytes <= 0) { error = bytes; break; } WARN_ON(old_pos != new_pos); len -= bytes; } /* call fsync once, either now or later along with metadata */ if (!error && ovl_should_sync(ofs) && datasync) error = vfs_fsync(new_file, 0); out_fput: fput(old_file); return error; } static int ovl_set_size(struct ovl_fs *ofs, struct dentry *upperdentry, struct kstat *stat) { struct iattr attr = { .ia_valid = ATTR_SIZE, .ia_size = stat->size, }; return ovl_do_notify_change(ofs, upperdentry, &attr); } static int ovl_set_timestamps(struct ovl_fs *ofs, struct dentry *upperdentry, struct kstat *stat) { struct iattr attr = { .ia_valid = ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_CTIME, .ia_atime = stat->atime, .ia_mtime = stat->mtime, }; return ovl_do_notify_change(ofs, upperdentry, &attr); } int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upperdentry, struct kstat *stat) { int err = 0; if (!S_ISLNK(stat->mode)) { struct iattr attr = { .ia_valid = ATTR_MODE, .ia_mode = stat->mode, }; err = ovl_do_notify_change(ofs, upperdentry, &attr); } if (!err) { struct iattr attr = { .ia_valid = ATTR_UID | ATTR_GID, .ia_vfsuid = VFSUIDT_INIT(stat->uid), .ia_vfsgid = VFSGIDT_INIT(stat->gid), }; err = ovl_do_notify_change(ofs, upperdentry, &attr); } if (!err) ovl_set_timestamps(ofs, upperdentry, stat); return err; } struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct inode *realinode, bool is_upper) { struct ovl_fh *fh; int fh_type, dwords; int buflen = MAX_HANDLE_SZ; uuid_t *uuid = &realinode->i_sb->s_uuid; int err; /* Make sure the real fid stays 32bit aligned */ BUILD_BUG_ON(OVL_FH_FID_OFFSET % 4); BUILD_BUG_ON(MAX_HANDLE_SZ + OVL_FH_FID_OFFSET > 255); fh = kzalloc(buflen + OVL_FH_FID_OFFSET, GFP_KERNEL); if (!fh) return ERR_PTR(-ENOMEM); /* * We encode a non-connectable file handle for non-dir, because we * only need to find the lower inode number and we don't want to pay * the price or reconnecting the dentry. */ dwords = buflen >> 2; fh_type = exportfs_encode_inode_fh(realinode, (void *)fh->fb.fid, &dwords, NULL, 0); buflen = (dwords << 2); err = -EIO; if (fh_type < 0 || fh_type == FILEID_INVALID || WARN_ON(buflen > MAX_HANDLE_SZ)) goto out_err; fh->fb.version = OVL_FH_VERSION; fh->fb.magic = OVL_FH_MAGIC; fh->fb.type = fh_type; fh->fb.flags = OVL_FH_FLAG_CPU_ENDIAN; /* * When we will want to decode an overlay dentry from this handle * and all layers are on the same fs, if we get a disconncted real * dentry when we decode fid, the only way to tell if we should assign * it to upperdentry or to lowerstack is by checking this flag. */ if (is_upper) fh->fb.flags |= OVL_FH_FLAG_PATH_UPPER; fh->fb.len = sizeof(fh->fb) + buflen; if (ovl_origin_uuid(ofs)) fh->fb.uuid = *uuid; return fh; out_err: kfree(fh); return ERR_PTR(err); } struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin) { /* * When lower layer doesn't support export operations store a 'null' fh, * so we can use the overlay.origin xattr to distignuish between a copy * up and a pure upper inode. */ if (!ovl_can_decode_fh(origin->d_sb)) return NULL; return ovl_encode_real_fh(ofs, d_inode(origin), false); } int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh, struct dentry *upper) { int err; /* * Do not fail when upper doesn't support xattrs. */ err = ovl_check_setxattr(ofs, upper, OVL_XATTR_ORIGIN, fh->buf, fh ? fh->fb.len : 0, 0); /* Ignore -EPERM from setting "user.*" on symlink/special */ return err == -EPERM ? 0 : err; } /* Store file handle of @upper dir in @index dir entry */ static int ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper, struct dentry *index) { const struct ovl_fh *fh; int err; fh = ovl_encode_real_fh(ofs, d_inode(upper), true); if (IS_ERR(fh)) return PTR_ERR(fh); err = ovl_setxattr(ofs, index, OVL_XATTR_UPPER, fh->buf, fh->fb.len); kfree(fh); return err; } /* * Create and install index entry. */ static int ovl_create_index(struct dentry *dentry, const struct ovl_fh *fh, struct dentry *upper) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *indexdir = ovl_indexdir(dentry->d_sb); struct dentry *index = NULL; struct dentry *temp = NULL; struct qstr name = { }; int err; /* * For now this is only used for creating index entry for directories, * because non-dir are copied up directly to index and then hardlinked * to upper dir. * * TODO: implement create index for non-dir, so we can call it when * encoding file handle for non-dir in case index does not exist. */ if (WARN_ON(!d_is_dir(dentry))) return -EIO; /* Directory not expected to be indexed before copy up */ if (WARN_ON(ovl_test_flag(OVL_INDEX, d_inode(dentry)))) return -EIO; err = ovl_get_index_name_fh(fh, &name); if (err) return err; temp = ovl_create_temp(ofs, indexdir, OVL_CATTR(S_IFDIR | 0)); err = PTR_ERR(temp); if (IS_ERR(temp)) goto free_name; err = ovl_set_upper_fh(ofs, upper, temp); if (err) goto out; err = ovl_parent_lock(indexdir, temp); if (err) goto out; index = ovl_lookup_upper(ofs, name.name, indexdir, name.len); if (IS_ERR(index)) { err = PTR_ERR(index); } else { err = ovl_do_rename(ofs, indexdir, temp, indexdir, index, 0); dput(index); } ovl_parent_unlock(indexdir); out: if (err) ovl_cleanup(ofs, indexdir, temp); dput(temp); free_name: kfree(name.name); return err; } struct ovl_copy_up_ctx { struct dentry *parent; struct dentry *dentry; struct path lowerpath; struct kstat stat; struct kstat pstat; const char *link; struct dentry *destdir; struct qstr destname; struct dentry *workdir; const struct ovl_fh *origin_fh; bool origin; bool indexed; bool metacopy; bool metacopy_digest; bool metadata_fsync; }; static int ovl_link_up(struct ovl_copy_up_ctx *c) { int err; struct dentry *upper; struct dentry *upperdir = ovl_dentry_upper(c->parent); struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct inode *udir = d_inode(upperdir); ovl_start_write(c->dentry); /* Mark parent "impure" because it may now contain non-pure upper */ err = ovl_set_impure(c->parent, upperdir); if (err) goto out; err = ovl_set_nlink_lower(c->dentry); if (err) goto out; inode_lock_nested(udir, I_MUTEX_PARENT); upper = ovl_lookup_upper(ofs, c->dentry->d_name.name, upperdir, c->dentry->d_name.len); err = PTR_ERR(upper); if (!IS_ERR(upper)) { err = ovl_do_link(ofs, ovl_dentry_upper(c->dentry), udir, upper); if (!err) { /* Restore timestamps on parent (best effort) */ ovl_set_timestamps(ofs, upperdir, &c->pstat); ovl_dentry_set_upper_alias(c->dentry); ovl_dentry_update_reval(c->dentry, upper); } dput(upper); } inode_unlock(udir); if (err) goto out; err = ovl_set_nlink_upper(c->dentry); out: ovl_end_write(c->dentry); return err; } static int ovl_copy_up_data(struct ovl_copy_up_ctx *c, const struct path *temp) { struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct file *new_file; int err; if (!S_ISREG(c->stat.mode) || c->metacopy || !c->stat.size) return 0; new_file = ovl_path_open(temp, O_LARGEFILE | O_WRONLY); if (IS_ERR(new_file)) return PTR_ERR(new_file); err = ovl_copy_up_file(ofs, c->dentry, new_file, c->stat.size, !c->metadata_fsync); fput(new_file); return err; } static int ovl_copy_up_metadata(struct ovl_copy_up_ctx *c, struct dentry *temp) { struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct inode *inode = d_inode(c->dentry); struct path upperpath = { .mnt = ovl_upper_mnt(ofs), .dentry = temp }; int err; err = ovl_copy_xattr(c->dentry->d_sb, &c->lowerpath, temp); if (err) return err; if (inode->i_flags & OVL_FATTR_I_FLAGS_MASK && (S_ISREG(c->stat.mode) || S_ISDIR(c->stat.mode))) { /* * Copy the fileattr inode flags that are the source of already * copied i_flags */ err = ovl_copy_fileattr(inode, &c->lowerpath, &upperpath); if (err) return err; } /* * Store identifier of lower inode in upper inode xattr to * allow lookup of the copy up origin inode. * * Don't set origin when we are breaking the association with a lower * hard link. */ if (c->origin) { err = ovl_set_origin_fh(ofs, c->origin_fh, temp); if (err) return err; } if (c->metacopy) { struct path lowerdatapath; struct ovl_metacopy metacopy_data = OVL_METACOPY_INIT; ovl_path_lowerdata(c->dentry, &lowerdatapath); if (WARN_ON_ONCE(lowerdatapath.dentry == NULL)) return -EIO; err = ovl_get_verity_digest(ofs, &lowerdatapath, &metacopy_data); if (err) return err; if (metacopy_data.digest_algo) c->metacopy_digest = true; err = ovl_set_metacopy_xattr(ofs, temp, &metacopy_data); if (err) return err; } inode_lock(temp->d_inode); if (S_ISREG(c->stat.mode)) err = ovl_set_size(ofs, temp, &c->stat); if (!err) err = ovl_set_attr(ofs, temp, &c->stat); inode_unlock(temp->d_inode); /* fsync metadata before moving it into upper dir */ if (!err && ovl_should_sync(ofs) && c->metadata_fsync) err = ovl_sync_file(&upperpath); return err; } struct ovl_cu_creds { const struct cred *old; struct cred *new; }; static int ovl_prep_cu_creds(struct dentry *dentry, struct ovl_cu_creds *cc) { int err; cc->old = cc->new = NULL; err = security_inode_copy_up(dentry, &cc->new); if (err < 0) return err; if (cc->new) cc->old = override_creds(cc->new); return 0; } static void ovl_revert_cu_creds(struct ovl_cu_creds *cc) { if (cc->new) { revert_creds(cc->old); put_cred(cc->new); } } /* * Copyup using workdir to prepare temp file. Used when copying up directories, * special files or when upper fs doesn't support O_TMPFILE. */ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c) { struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct inode *inode; struct path path = { .mnt = ovl_upper_mnt(ofs) }; struct dentry *temp, *upper, *trap; struct ovl_cu_creds cc; int err; struct ovl_cattr cattr = { /* Can't properly set mode on creation because of the umask */ .mode = c->stat.mode & S_IFMT, .rdev = c->stat.rdev, .link = c->link }; err = ovl_prep_cu_creds(c->dentry, &cc); if (err) return err; ovl_start_write(c->dentry); temp = ovl_create_temp(ofs, c->workdir, &cattr); ovl_end_write(c->dentry); ovl_revert_cu_creds(&cc); if (IS_ERR(temp)) return PTR_ERR(temp); /* * Copy up data first and then xattrs. Writing data after * xattrs will remove security.capability xattr automatically. */ path.dentry = temp; err = ovl_copy_up_data(c, &path); ovl_start_write(c->dentry); if (err) goto cleanup_unlocked; if (S_ISDIR(c->stat.mode) && c->indexed) { err = ovl_create_index(c->dentry, c->origin_fh, temp); if (err) goto cleanup_unlocked; } /* * We cannot hold lock_rename() throughout this helper, because of * lock ordering with sb_writers, which shouldn't be held when calling * ovl_copy_up_data(), so lock workdir and destdir and make sure that * temp wasn't moved before copy up completion or cleanup. */ trap = lock_rename(c->workdir, c->destdir); if (trap || temp->d_parent != c->workdir) { /* temp or workdir moved underneath us? abort without cleanup */ dput(temp); err = -EIO; if (!IS_ERR(trap)) unlock_rename(c->workdir, c->destdir); goto out; } err = ovl_copy_up_metadata(c, temp); if (err) goto cleanup; upper = ovl_lookup_upper(ofs, c->destname.name, c->destdir, c->destname.len); err = PTR_ERR(upper); if (IS_ERR(upper)) goto cleanup; err = ovl_do_rename(ofs, c->workdir, temp, c->destdir, upper, 0); unlock_rename(c->workdir, c->destdir); dput(upper); if (err) goto cleanup_unlocked; inode = d_inode(c->dentry); if (c->metacopy_digest) ovl_set_flag(OVL_HAS_DIGEST, inode); else ovl_clear_flag(OVL_HAS_DIGEST, inode); ovl_clear_flag(OVL_VERIFIED_DIGEST, inode); if (!c->metacopy) ovl_set_upperdata(inode); ovl_inode_update(inode, temp); if (S_ISDIR(inode->i_mode)) ovl_set_flag(OVL_WHITEOUTS, inode); out: ovl_end_write(c->dentry); return err; cleanup: unlock_rename(c->workdir, c->destdir); cleanup_unlocked: ovl_cleanup(ofs, c->workdir, temp); dput(temp); goto out; } /* Copyup using O_TMPFILE which does not require cross dir locking */ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c) { struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct inode *udir = d_inode(c->destdir); struct dentry *temp, *upper; struct file *tmpfile; struct ovl_cu_creds cc; int err; err = ovl_prep_cu_creds(c->dentry, &cc); if (err) return err; ovl_start_write(c->dentry); tmpfile = ovl_do_tmpfile(ofs, c->workdir, c->stat.mode); ovl_end_write(c->dentry); ovl_revert_cu_creds(&cc); if (IS_ERR(tmpfile)) return PTR_ERR(tmpfile); temp = tmpfile->f_path.dentry; if (!c->metacopy && c->stat.size) { err = ovl_copy_up_file(ofs, c->dentry, tmpfile, c->stat.size, !c->metadata_fsync); if (err) goto out_fput; } ovl_start_write(c->dentry); err = ovl_copy_up_metadata(c, temp); if (err) goto out; inode_lock_nested(udir, I_MUTEX_PARENT); upper = ovl_lookup_upper(ofs, c->destname.name, c->destdir, c->destname.len); err = PTR_ERR(upper); if (!IS_ERR(upper)) { err = ovl_do_link(ofs, temp, udir, upper); dput(upper); } inode_unlock(udir); if (err) goto out; if (c->metacopy_digest) ovl_set_flag(OVL_HAS_DIGEST, d_inode(c->dentry)); else ovl_clear_flag(OVL_HAS_DIGEST, d_inode(c->dentry)); ovl_clear_flag(OVL_VERIFIED_DIGEST, d_inode(c->dentry)); if (!c->metacopy) ovl_set_upperdata(d_inode(c->dentry)); ovl_inode_update(d_inode(c->dentry), dget(temp)); out: ovl_end_write(c->dentry); out_fput: fput(tmpfile); return err; } /* * Copy up a single dentry * * All renames start with copy up of source if necessary. The actual * rename will only proceed once the copy up was successful. Copy up uses * upper parent i_mutex for exclusion. Since rename can change d_parent it * is possible that the copy up will lock the old parent. At that point * the file will have already been copied up anyway. */ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c) { int err; struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct dentry *origin = c->lowerpath.dentry; struct ovl_fh *fh = NULL; bool to_index = false; /* * Indexed non-dir is copied up directly to the index entry and then * hardlinked to upper dir. Indexed dir is copied up to indexdir, * then index entry is created and then copied up dir installed. * Copying dir up to indexdir instead of workdir simplifies locking. */ if (ovl_need_index(c->dentry)) { c->indexed = true; if (S_ISDIR(c->stat.mode)) c->workdir = ovl_indexdir(c->dentry->d_sb); else to_index = true; } if (S_ISDIR(c->stat.mode) || c->stat.nlink == 1 || to_index) { fh = ovl_get_origin_fh(ofs, origin); if (IS_ERR(fh)) return PTR_ERR(fh); /* origin_fh may be NULL */ c->origin_fh = fh; c->origin = true; } if (to_index) { c->destdir = ovl_indexdir(c->dentry->d_sb); err = ovl_get_index_name(ofs, origin, &c->destname); if (err) goto out_free_fh; } else if (WARN_ON(!c->parent)) { /* Disconnected dentry must be copied up to index dir */ err = -EIO; goto out_free_fh; } else { /* * c->dentry->d_name is stabilzed by ovl_copy_up_start(), * because if we got here, it means that c->dentry has no upper * alias and changing ->d_name means going through ovl_rename() * that will call ovl_copy_up() on source and target dentry. */ c->destname = c->dentry->d_name; /* * Mark parent "impure" because it may now contain non-pure * upper */ ovl_start_write(c->dentry); err = ovl_set_impure(c->parent, c->destdir); ovl_end_write(c->dentry); if (err) goto out_free_fh; } /* Should we copyup with O_TMPFILE or with workdir? */ if (S_ISREG(c->stat.mode) && ofs->tmpfile) err = ovl_copy_up_tmpfile(c); else err = ovl_copy_up_workdir(c); if (err) goto out; if (c->indexed) ovl_set_flag(OVL_INDEX, d_inode(c->dentry)); ovl_start_write(c->dentry); if (to_index) { /* Initialize nlink for copy up of disconnected dentry */ err = ovl_set_nlink_upper(c->dentry); } else { struct inode *udir = d_inode(c->destdir); /* Restore timestamps on parent (best effort) */ inode_lock(udir); ovl_set_timestamps(ofs, c->destdir, &c->pstat); inode_unlock(udir); ovl_dentry_set_upper_alias(c->dentry); ovl_dentry_update_reval(c->dentry, ovl_dentry_upper(c->dentry)); } ovl_end_write(c->dentry); out: if (to_index) kfree(c->destname.name); out_free_fh: kfree(fh); return err; } static bool ovl_need_meta_copy_up(struct dentry *dentry, umode_t mode, int flags) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); if (!ofs->config.metacopy) return false; if (!S_ISREG(mode)) return false; if (flags && ((OPEN_FMODE(flags) & FMODE_WRITE) || (flags & O_TRUNC))) return false; /* Fall back to full copy if no fsverity on source data and we require verity */ if (ofs->config.verity_mode == OVL_VERITY_REQUIRE) { struct path lowerdata; ovl_path_lowerdata(dentry, &lowerdata); if (WARN_ON_ONCE(lowerdata.dentry == NULL) || ovl_ensure_verity_loaded(&lowerdata) || !fsverity_active(d_inode(lowerdata.dentry))) { return false; } } return true; } static ssize_t ovl_getxattr_value(const struct path *path, char *name, char **value) { ssize_t res; char *buf; res = ovl_do_getxattr(path, name, NULL, 0); if (res == -ENODATA || res == -EOPNOTSUPP) res = 0; if (res > 0) { buf = kzalloc(res, GFP_KERNEL); if (!buf) return -ENOMEM; res = ovl_do_getxattr(path, name, buf, res); if (res < 0) kfree(buf); else *value = buf; } return res; } /* Copy up data of an inode which was copied up metadata only in the past. */ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c) { struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct path upperpath; int err; char *capability = NULL; ssize_t cap_size; ovl_path_upper(c->dentry, &upperpath); if (WARN_ON(upperpath.dentry == NULL)) return -EIO; if (c->stat.size) { err = cap_size = ovl_getxattr_value(&upperpath, XATTR_NAME_CAPS, &capability); if (cap_size < 0) goto out; } err = ovl_copy_up_data(c, &upperpath); if (err) goto out_free; /* * Writing to upper file will clear security.capability xattr. We * don't want that to happen for normal copy-up operation. */ ovl_start_write(c->dentry); if (capability) { err = ovl_do_setxattr(ofs, upperpath.dentry, XATTR_NAME_CAPS, capability, cap_size, 0); } if (!err) { err = ovl_removexattr(ofs, upperpath.dentry, OVL_XATTR_METACOPY); } ovl_end_write(c->dentry); if (err) goto out_free; ovl_clear_flag(OVL_HAS_DIGEST, d_inode(c->dentry)); ovl_clear_flag(OVL_VERIFIED_DIGEST, d_inode(c->dentry)); ovl_set_upperdata(d_inode(c->dentry)); out_free: kfree(capability); out: return err; } static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, int flags) { int err; DEFINE_DELAYED_CALL(done); struct path parentpath; struct ovl_copy_up_ctx ctx = { .parent = parent, .dentry = dentry, .workdir = ovl_workdir(dentry), }; if (WARN_ON(!ctx.workdir)) return -EROFS; ovl_path_lower(dentry, &ctx.lowerpath); err = vfs_getattr(&ctx.lowerpath, &ctx.stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); if (err) return err; if (!kuid_has_mapping(current_user_ns(), ctx.stat.uid) || !kgid_has_mapping(current_user_ns(), ctx.stat.gid)) return -EOVERFLOW; /* * With metacopy disabled, we fsync after final metadata copyup, for * both regular files and directories to get atomic copyup semantics * on filesystems that do not use strict metadata ordering (e.g. ubifs). * * With metacopy enabled we want to avoid fsync on all meta copyup * that will hurt performance of workloads such as chown -R, so we * only fsync on data copyup as legacy behavior. */ ctx.metadata_fsync = !OVL_FS(dentry->d_sb)->config.metacopy && (S_ISREG(ctx.stat.mode) || S_ISDIR(ctx.stat.mode)); ctx.metacopy = ovl_need_meta_copy_up(dentry, ctx.stat.mode, flags); if (parent) { ovl_path_upper(parent, &parentpath); ctx.destdir = parentpath.dentry; err = vfs_getattr(&parentpath, &ctx.pstat, STATX_ATIME | STATX_MTIME, AT_STATX_SYNC_AS_STAT); if (err) return err; } /* maybe truncate regular file. this has no effect on dirs */ if (flags & O_TRUNC) ctx.stat.size = 0; if (S_ISLNK(ctx.stat.mode)) { ctx.link = vfs_get_link(ctx.lowerpath.dentry, &done); if (IS_ERR(ctx.link)) return PTR_ERR(ctx.link); } err = ovl_copy_up_start(dentry, flags); /* err < 0: interrupted, err > 0: raced with another copy-up */ if (unlikely(err)) { if (err > 0) err = 0; } else { if (!ovl_dentry_upper(dentry)) err = ovl_do_copy_up(&ctx); if (!err && parent && !ovl_dentry_has_upper_alias(dentry)) err = ovl_link_up(&ctx); if (!err && ovl_dentry_needs_data_copy_up_locked(dentry, flags)) err = ovl_copy_up_meta_inode_data(&ctx); ovl_copy_up_end(dentry); } do_delayed_call(&done); return err; } static int ovl_copy_up_flags(struct dentry *dentry, int flags) { int err = 0; const struct cred *old_cred; bool disconnected = (dentry->d_flags & DCACHE_DISCONNECTED); /* * With NFS export, copy up can get called for a disconnected non-dir. * In this case, we will copy up lower inode to index dir without * linking it to upper dir. */ if (WARN_ON(disconnected && d_is_dir(dentry))) return -EIO; /* * We may not need lowerdata if we are only doing metacopy up, but it is * not very important to optimize this case, so do lazy lowerdata lookup * before any copy up, so we can do it before taking ovl_inode_lock(). */ err = ovl_verify_lowerdata(dentry); if (err) return err; old_cred = ovl_override_creds(dentry->d_sb); while (!err) { struct dentry *next; struct dentry *parent = NULL; if (ovl_already_copied_up(dentry, flags)) break; next = dget(dentry); /* find the topmost dentry not yet copied up */ for (; !disconnected;) { parent = dget_parent(next); if (ovl_dentry_upper(parent)) break; dput(next); next = parent; } err = ovl_copy_up_one(parent, next, flags); dput(parent); dput(next); } ovl_revert_creds(old_cred); return err; } static bool ovl_open_need_copy_up(struct dentry *dentry, int flags) { /* Copy up of disconnected dentry does not set upper alias */ if (ovl_already_copied_up(dentry, flags)) return false; if (special_file(d_inode(dentry)->i_mode)) return false; if (!ovl_open_flags_need_copy_up(flags)) return false; return true; } int ovl_maybe_copy_up(struct dentry *dentry, int flags) { if (!ovl_open_need_copy_up(dentry, flags)) return 0; return ovl_copy_up_flags(dentry, flags); } int ovl_copy_up_with_data(struct dentry *dentry) { return ovl_copy_up_flags(dentry, O_WRONLY); } int ovl_copy_up(struct dentry *dentry) { return ovl_copy_up_flags(dentry, 0); }
37 4 35 35 35 35 5 5 5 5 5 5 5 3 5 3 3 5 5 5 5 5 2 5 5 3 2 5 5 5 3 3 42 3 42 40 3 42 42 42 42 42 42 42 3 3 3 3 3 3 3 3 3 3 35 39 3 3 3 3 4 4 26 26 4 22 22 7 7 6 9 9 9 4 4 4 4 4 4 4 4 34 34 34 34 34 34 34 34 34 3 3 3 3 3 3 6 7 7 7 36 36 4 34 34 4 4 4 36 36 4 4 34 33 36 7 6 7 7 7 7 7 7 3 3 3 3 3 3 3 1 1 1 1 1 1 5 5 4 5 5 5 5 5 5 5 5 5 3 7 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 36 36 35 4 34 34 34 4 4 4 4 4 4 4 4 3 3 3 3 3 3 38 38 16 34 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 // SPDX-License-Identifier: GPL-2.0-only /* * mac80211 - channel management * Copyright 2020 - 2025 Intel Corporation */ #include <linux/nl80211.h> #include <linux/export.h> #include <linux/rtnetlink.h> #include <net/cfg80211.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" static int ieee80211_chanctx_num_assigned(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { struct ieee80211_link_data *link; int num = 0; lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry(link, &ctx->assigned_links, assigned_chanctx_list) num++; return num; } static int ieee80211_chanctx_num_reserved(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { struct ieee80211_link_data *link; int num = 0; lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) num++; return num; } int ieee80211_chanctx_refcount(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { return ieee80211_chanctx_num_assigned(local, ctx) + ieee80211_chanctx_num_reserved(local, ctx); } static int ieee80211_num_chanctx(struct ieee80211_local *local, int radio_idx) { struct ieee80211_chanctx *ctx; int num = 0; lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry(ctx, &local->chanctx_list, list) { if (radio_idx >= 0 && ctx->conf.radio_idx != radio_idx) continue; num++; } return num; } static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local, int radio_idx) { lockdep_assert_wiphy(local->hw.wiphy); return ieee80211_num_chanctx(local, radio_idx) < ieee80211_max_num_channels(local, radio_idx); } static struct ieee80211_chanctx * ieee80211_link_get_chanctx(struct ieee80211_link_data *link) { struct ieee80211_local *local __maybe_unused = link->sdata->local; struct ieee80211_chanctx_conf *conf; conf = rcu_dereference_protected(link->conf->chanctx_conf, lockdep_is_held(&local->hw.wiphy->mtx)); if (!conf) return NULL; return container_of(conf, struct ieee80211_chanctx, conf); } bool ieee80211_chanreq_identical(const struct ieee80211_chan_req *a, const struct ieee80211_chan_req *b) { if (!cfg80211_chandef_identical(&a->oper, &b->oper)) return false; if (!a->ap.chan && !b->ap.chan) return true; return cfg80211_chandef_identical(&a->ap, &b->ap); } static const struct ieee80211_chan_req * ieee80211_chanreq_compatible(const struct ieee80211_chan_req *a, const struct ieee80211_chan_req *b, struct ieee80211_chan_req *tmp) { const struct cfg80211_chan_def *compat; if (a->ap.chan && b->ap.chan && !cfg80211_chandef_identical(&a->ap, &b->ap)) return NULL; compat = cfg80211_chandef_compatible(&a->oper, &b->oper); if (!compat) return NULL; /* Note: later code assumes this always fills & returns tmp if compat */ tmp->oper = *compat; tmp->ap = a->ap.chan ? a->ap : b->ap; return tmp; } static const struct ieee80211_chan_req * ieee80211_chanctx_compatible(struct ieee80211_chanctx *ctx, const struct ieee80211_chan_req *req, struct ieee80211_chan_req *tmp) { const struct ieee80211_chan_req *ret; struct ieee80211_chan_req tmp2; *tmp = (struct ieee80211_chan_req){ .oper = ctx->conf.def, .ap = ctx->conf.ap, }; ret = ieee80211_chanreq_compatible(tmp, req, &tmp2); if (!ret) return NULL; *tmp = *ret; return tmp; } static const struct ieee80211_chan_req * ieee80211_chanctx_reserved_chanreq(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, const struct ieee80211_chan_req *req, struct ieee80211_chan_req *tmp) { struct ieee80211_link_data *link; lockdep_assert_wiphy(local->hw.wiphy); if (WARN_ON(!req)) return NULL; list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) { req = ieee80211_chanreq_compatible(&link->reserved, req, tmp); if (!req) break; } return req; } static const struct ieee80211_chan_req * ieee80211_chanctx_non_reserved_chandef(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, const struct ieee80211_chan_req *compat, struct ieee80211_chan_req *tmp) { struct ieee80211_link_data *link; const struct ieee80211_chan_req *comp_def = compat; lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry(link, &ctx->assigned_links, assigned_chanctx_list) { struct ieee80211_bss_conf *link_conf = link->conf; if (link->reserved_chanctx) continue; comp_def = ieee80211_chanreq_compatible(&link_conf->chanreq, comp_def, tmp); if (!comp_def) break; } return comp_def; } static bool ieee80211_chanctx_can_reserve(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, const struct ieee80211_chan_req *req) { struct ieee80211_chan_req tmp; lockdep_assert_wiphy(local->hw.wiphy); if (!ieee80211_chanctx_reserved_chanreq(local, ctx, req, &tmp)) return false; if (!ieee80211_chanctx_non_reserved_chandef(local, ctx, req, &tmp)) return false; if (!list_empty(&ctx->reserved_links) && ieee80211_chanctx_reserved_chanreq(local, ctx, req, &tmp)) return true; return false; } static struct ieee80211_chanctx * ieee80211_find_reservation_chanctx(struct ieee80211_local *local, const struct ieee80211_chan_req *chanreq, enum ieee80211_chanctx_mode mode) { struct ieee80211_chanctx *ctx; lockdep_assert_wiphy(local->hw.wiphy); if (mode == IEEE80211_CHANCTX_EXCLUSIVE) return NULL; list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) continue; if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) continue; if (!ieee80211_chanctx_can_reserve(local, ctx, chanreq)) continue; return ctx; } return NULL; } static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta, unsigned int link_id) { enum ieee80211_sta_rx_bandwidth width; struct link_sta_info *link_sta; link_sta = wiphy_dereference(sta->local->hw.wiphy, sta->link[link_id]); /* no effect if this STA has no presence on this link */ if (!link_sta) return NL80211_CHAN_WIDTH_20_NOHT; /* * We assume that TX/RX might be asymmetric (so e.g. VHT operating * mode notification changes what a STA wants to receive, but not * necessarily what it will transmit to us), and therefore use the * capabilities here. Calling it RX bandwidth capability is a bit * wrong though, since capabilities are in fact symmetric. */ width = ieee80211_sta_cap_rx_bw(link_sta); switch (width) { case IEEE80211_STA_RX_BW_20: if (link_sta->pub->ht_cap.ht_supported) return NL80211_CHAN_WIDTH_20; else return NL80211_CHAN_WIDTH_20_NOHT; case IEEE80211_STA_RX_BW_40: return NL80211_CHAN_WIDTH_40; case IEEE80211_STA_RX_BW_80: return NL80211_CHAN_WIDTH_80; case IEEE80211_STA_RX_BW_160: /* * This applied for both 160 and 80+80. since we use * the returned value to consider degradation of * ctx->conf.min_def, we have to make sure to take * the bigger one (NL80211_CHAN_WIDTH_160). * Otherwise we might try degrading even when not * needed, as the max required sta_bw returned (80+80) * might be smaller than the configured bw (160). */ return NL80211_CHAN_WIDTH_160; case IEEE80211_STA_RX_BW_320: return NL80211_CHAN_WIDTH_320; default: WARN_ON(1); return NL80211_CHAN_WIDTH_20; } } static enum nl80211_chan_width ieee80211_get_max_required_bw(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; unsigned int link_id = link->link_id; enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT; struct sta_info *sta; lockdep_assert_wiphy(sdata->local->hw.wiphy); list_for_each_entry(sta, &sdata->local->sta_list, list) { if (sdata != sta->sdata && !(sta->sdata->bss && sta->sdata->bss == sdata->bss)) continue; max_bw = max(max_bw, ieee80211_get_sta_bw(sta, link_id)); } return max_bw; } static enum nl80211_chan_width ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, struct ieee80211_link_data *rsvd_for, bool check_reserved) { struct ieee80211_sub_if_data *sdata; struct ieee80211_link_data *link; enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT; if (WARN_ON(check_reserved && rsvd_for)) return ctx->conf.def.width; for_each_sdata_link(local, link) { enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT; if (check_reserved) { if (link->reserved_chanctx != ctx) continue; } else if (link != rsvd_for && rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf) continue; switch (link->sdata->vif.type) { case NL80211_IFTYPE_STATION: if (!link->sdata->vif.cfg.assoc) { /* * The AP's sta->bandwidth may not yet be set * at this point (pre-association), so simply * take the width from the chandef. We cannot * have TDLS peers yet (only after association). */ width = link->conf->chanreq.oper.width; break; } /* * otherwise just use min_def like in AP, depending on what * we currently think the AP STA (and possibly TDLS peers) * require(s) */ fallthrough; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: width = ieee80211_get_max_required_bw(link); break; case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: continue; case NL80211_IFTYPE_MONITOR: WARN_ON_ONCE(!ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)); fallthrough; case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_OCB: width = link->conf->chanreq.oper.width; break; case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: WARN_ON_ONCE(1); } max_bw = max(max_bw, width); } /* use the configured bandwidth in case of monitor interface */ sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata); if (sdata && rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &ctx->conf) max_bw = max(max_bw, ctx->conf.def.width); return max_bw; } /* * recalc the min required chan width of the channel context, which is * the max of min required widths of all the interfaces bound to this * channel context. */ static u32 _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, struct ieee80211_link_data *rsvd_for, bool check_reserved) { enum nl80211_chan_width max_bw; struct cfg80211_chan_def min_def; lockdep_assert_wiphy(local->hw.wiphy); /* don't optimize non-20MHz based and radar_enabled confs */ if (ctx->conf.def.width == NL80211_CHAN_WIDTH_5 || ctx->conf.def.width == NL80211_CHAN_WIDTH_10 || ctx->conf.def.width == NL80211_CHAN_WIDTH_1 || ctx->conf.def.width == NL80211_CHAN_WIDTH_2 || ctx->conf.def.width == NL80211_CHAN_WIDTH_4 || ctx->conf.def.width == NL80211_CHAN_WIDTH_8 || ctx->conf.def.width == NL80211_CHAN_WIDTH_16 || ctx->conf.radar_enabled) { ctx->conf.min_def = ctx->conf.def; return 0; } max_bw = ieee80211_get_chanctx_max_required_bw(local, ctx, rsvd_for, check_reserved); /* downgrade chandef up to max_bw */ min_def = ctx->conf.def; while (min_def.width > max_bw) ieee80211_chandef_downgrade(&min_def, NULL); if (cfg80211_chandef_identical(&ctx->conf.min_def, &min_def)) return 0; ctx->conf.min_def = min_def; if (!ctx->driver_present) return 0; return IEEE80211_CHANCTX_CHANGE_MIN_DEF; } static void ieee80211_chan_bw_change(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, bool reserved, bool narrowed) { struct sta_info *sta; struct ieee80211_supported_band *sband = local->hw.wiphy->bands[ctx->conf.def.chan->band]; rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { struct ieee80211_sub_if_data *sdata = sta->sdata; enum ieee80211_sta_rx_bandwidth new_sta_bw; unsigned int link_id; if (!ieee80211_sdata_running(sta->sdata)) continue; for (link_id = 0; link_id < ARRAY_SIZE(sta->sdata->link); link_id++) { struct ieee80211_link_data *link = rcu_dereference(sdata->link[link_id]); struct ieee80211_bss_conf *link_conf; struct cfg80211_chan_def *new_chandef; struct link_sta_info *link_sta; if (!link) continue; link_conf = link->conf; if (rcu_access_pointer(link_conf->chanctx_conf) != &ctx->conf) continue; link_sta = rcu_dereference(sta->link[link_id]); if (!link_sta) continue; if (reserved) new_chandef = &link->reserved.oper; else new_chandef = &link_conf->chanreq.oper; new_sta_bw = _ieee80211_sta_cur_vht_bw(link_sta, new_chandef); /* nothing change */ if (new_sta_bw == link_sta->pub->bandwidth) continue; /* vif changed to narrow BW and narrow BW for station wasn't * requested or vice versa */ if ((new_sta_bw < link_sta->pub->bandwidth) == !narrowed) continue; link_sta->pub->bandwidth = new_sta_bw; rate_control_rate_update(local, sband, link_sta, IEEE80211_RC_BW_CHANGED); } } rcu_read_unlock(); } /* * recalc the min required chan width of the channel context, which is * the max of min required widths of all the interfaces bound to this * channel context. */ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, struct ieee80211_link_data *rsvd_for, bool check_reserved) { u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for, check_reserved); if (!changed) return; /* check is BW narrowed */ ieee80211_chan_bw_change(local, ctx, false, true); drv_change_chanctx(local, ctx, changed); /* check is BW wider */ ieee80211_chan_bw_change(local, ctx, false, false); } static void _ieee80211_change_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, struct ieee80211_chanctx *old_ctx, const struct ieee80211_chan_req *chanreq, struct ieee80211_link_data *rsvd_for) { const struct cfg80211_chan_def *chandef = &chanreq->oper; struct ieee80211_chan_req ctx_req = { .oper = ctx->conf.def, .ap = ctx->conf.ap, }; u32 changed = 0; /* expected to handle only 20/40/80/160/320 channel widths */ switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_40: case NL80211_CHAN_WIDTH_80: case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: case NL80211_CHAN_WIDTH_320: break; default: WARN_ON(1); } /* Check maybe BW narrowed - we do this _before_ calling recalc_chanctx_min_def * due to maybe not returning from it, e.g in case new context was added * first time with all parameters up to date. */ ieee80211_chan_bw_change(local, old_ctx, false, true); if (ieee80211_chanreq_identical(&ctx_req, chanreq)) { ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for, false); return; } WARN_ON(ieee80211_chanctx_refcount(local, ctx) > 1 && !cfg80211_chandef_compatible(&ctx->conf.def, &chanreq->oper)); ieee80211_remove_wbrf(local, &ctx->conf.def); if (!cfg80211_chandef_identical(&ctx->conf.def, &chanreq->oper)) { if (ctx->conf.def.width != chanreq->oper.width) changed |= IEEE80211_CHANCTX_CHANGE_WIDTH; if (ctx->conf.def.punctured != chanreq->oper.punctured) changed |= IEEE80211_CHANCTX_CHANGE_PUNCTURING; } if (!cfg80211_chandef_identical(&ctx->conf.ap, &chanreq->ap)) changed |= IEEE80211_CHANCTX_CHANGE_AP; ctx->conf.def = *chandef; ctx->conf.ap = chanreq->ap; /* check if min chanctx also changed */ changed |= _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for, false); ieee80211_add_wbrf(local, &ctx->conf.def); drv_change_chanctx(local, ctx, changed); /* check if BW is wider */ ieee80211_chan_bw_change(local, old_ctx, false, false); } static void ieee80211_change_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, struct ieee80211_chanctx *old_ctx, const struct ieee80211_chan_req *chanreq) { _ieee80211_change_chanctx(local, ctx, old_ctx, chanreq, NULL); } /* Note: if successful, the returned chanctx is reserved for the link */ static struct ieee80211_chanctx * ieee80211_find_chanctx(struct ieee80211_local *local, struct ieee80211_link_data *link, const struct ieee80211_chan_req *chanreq, enum ieee80211_chanctx_mode mode) { struct ieee80211_chan_req tmp; struct ieee80211_chanctx *ctx; lockdep_assert_wiphy(local->hw.wiphy); if (mode == IEEE80211_CHANCTX_EXCLUSIVE) return NULL; if (WARN_ON(link->reserved_chanctx)) return NULL; list_for_each_entry(ctx, &local->chanctx_list, list) { const struct ieee80211_chan_req *compat; if (ctx->replace_state != IEEE80211_CHANCTX_REPLACE_NONE) continue; if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) continue; compat = ieee80211_chanctx_compatible(ctx, chanreq, &tmp); if (!compat) continue; compat = ieee80211_chanctx_reserved_chanreq(local, ctx, compat, &tmp); if (!compat) continue; /* * Reserve the chanctx temporarily, as the driver might change * active links during callbacks we make into it below and/or * later during assignment, which could (otherwise) cause the * context to actually be removed. */ link->reserved_chanctx = ctx; list_add(&link->reserved_chanctx_list, &ctx->reserved_links); ieee80211_change_chanctx(local, ctx, ctx, compat); return ctx; } return NULL; } bool ieee80211_is_radar_required(struct ieee80211_local *local, struct cfg80211_scan_request *req) { struct wiphy *wiphy = local->hw.wiphy; struct ieee80211_link_data *link; struct ieee80211_channel *chan; int radio_idx; lockdep_assert_wiphy(local->hw.wiphy); if (!req) return false; for_each_sdata_link(local, link) { if (link->radar_required) { chan = link->conf->chanreq.oper.chan; radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chan); if (ieee80211_is_radio_idx_in_scan_req(wiphy, req, radio_idx)) return true; } } return false; } static bool ieee80211_chanctx_radar_required(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { struct ieee80211_chanctx_conf *conf = &ctx->conf; struct ieee80211_link_data *link; lockdep_assert_wiphy(local->hw.wiphy); for_each_sdata_link(local, link) { if (rcu_access_pointer(link->conf->chanctx_conf) != conf) continue; if (!link->radar_required) continue; return true; } return false; } static struct ieee80211_chanctx * ieee80211_alloc_chanctx(struct ieee80211_local *local, const struct ieee80211_chan_req *chanreq, enum ieee80211_chanctx_mode mode, int radio_idx) { struct ieee80211_chanctx *ctx; lockdep_assert_wiphy(local->hw.wiphy); ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL); if (!ctx) return NULL; INIT_LIST_HEAD(&ctx->assigned_links); INIT_LIST_HEAD(&ctx->reserved_links); ctx->conf.def = chanreq->oper; ctx->conf.ap = chanreq->ap; ctx->conf.rx_chains_static = 1; ctx->conf.rx_chains_dynamic = 1; ctx->mode = mode; ctx->conf.radar_enabled = false; ctx->conf.radio_idx = radio_idx; ctx->radar_detected = false; _ieee80211_recalc_chanctx_min_def(local, ctx, NULL, false); return ctx; } static int ieee80211_add_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { u32 changed; int err; lockdep_assert_wiphy(local->hw.wiphy); ieee80211_add_wbrf(local, &ctx->conf.def); /* turn idle off *before* setting channel -- some drivers need that */ changed = ieee80211_idle_off(local); if (changed) ieee80211_hw_config(local, -1, changed); err = drv_add_chanctx(local, ctx); if (err) { ieee80211_recalc_idle(local); return err; } return 0; } static struct ieee80211_chanctx * ieee80211_new_chanctx(struct ieee80211_local *local, const struct ieee80211_chan_req *chanreq, enum ieee80211_chanctx_mode mode, bool assign_on_failure, int radio_idx) { struct ieee80211_chanctx *ctx; int err; lockdep_assert_wiphy(local->hw.wiphy); ctx = ieee80211_alloc_chanctx(local, chanreq, mode, radio_idx); if (!ctx) return ERR_PTR(-ENOMEM); err = ieee80211_add_chanctx(local, ctx); if (!assign_on_failure && err) { kfree(ctx); return ERR_PTR(err); } /* We ignored a driver error, see _ieee80211_set_active_links */ WARN_ON_ONCE(err && !local->in_reconfig); list_add_rcu(&ctx->list, &local->chanctx_list); return ctx; } static void ieee80211_del_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, bool skip_idle_recalc) { lockdep_assert_wiphy(local->hw.wiphy); drv_remove_chanctx(local, ctx); if (!skip_idle_recalc) ieee80211_recalc_idle(local); ieee80211_remove_wbrf(local, &ctx->conf.def); } static void ieee80211_free_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, bool skip_idle_recalc) { lockdep_assert_wiphy(local->hw.wiphy); WARN_ON_ONCE(ieee80211_chanctx_refcount(local, ctx) != 0); list_del_rcu(&ctx->list); ieee80211_del_chanctx(local, ctx, skip_idle_recalc); kfree_rcu(ctx, rcu_head); } void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { struct ieee80211_chanctx_conf *conf = &ctx->conf; const struct ieee80211_chan_req *compat = NULL; struct ieee80211_link_data *link; struct ieee80211_chan_req tmp; struct sta_info *sta; lockdep_assert_wiphy(local->hw.wiphy); for_each_sdata_link(local, link) { struct ieee80211_bss_conf *link_conf; if (link->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) continue; link_conf = link->conf; if (rcu_access_pointer(link_conf->chanctx_conf) != conf) continue; if (!compat) compat = &link_conf->chanreq; compat = ieee80211_chanreq_compatible(&link_conf->chanreq, compat, &tmp); if (WARN_ON_ONCE(!compat)) return; } if (WARN_ON_ONCE(!compat)) return; /* TDLS peers can sometimes affect the chandef width */ list_for_each_entry(sta, &local->sta_list, list) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_chan_req tdls_chanreq = {}; int tdls_link_id; if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) || !test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->tdls_chandef.chan) continue; tdls_link_id = ieee80211_tdls_sta_link_id(sta); link = sdata_dereference(sdata->link[tdls_link_id], sdata); if (!link) continue; if (rcu_access_pointer(link->conf->chanctx_conf) != conf) continue; tdls_chanreq.oper = sta->tdls_chandef; /* note this always fills and returns &tmp if compat */ compat = ieee80211_chanreq_compatible(&tdls_chanreq, compat, &tmp); if (WARN_ON_ONCE(!compat)) return; } ieee80211_change_chanctx(local, ctx, ctx, compat); } static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *chanctx) { bool radar_enabled; lockdep_assert_wiphy(local->hw.wiphy); radar_enabled = ieee80211_chanctx_radar_required(local, chanctx); if (radar_enabled == chanctx->conf.radar_enabled) return; chanctx->conf.radar_enabled = radar_enabled; drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR); } static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link, struct ieee80211_chanctx *new_ctx, bool assign_on_failure) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *curr_ctx = NULL; bool new_idle; int ret; if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_NAN)) return -EOPNOTSUPP; conf = rcu_dereference_protected(link->conf->chanctx_conf, lockdep_is_held(&local->hw.wiphy->mtx)); if (conf && !local->in_reconfig) { curr_ctx = container_of(conf, struct ieee80211_chanctx, conf); drv_unassign_vif_chanctx(local, sdata, link->conf, curr_ctx); conf = NULL; list_del(&link->assigned_chanctx_list); } if (new_ctx) { /* recalc considering the link we'll use it for now */ ieee80211_recalc_chanctx_min_def(local, new_ctx, link, false); ret = drv_assign_vif_chanctx(local, sdata, link->conf, new_ctx); if (assign_on_failure || !ret) { /* Need to continue, see _ieee80211_set_active_links */ WARN_ON_ONCE(ret && !local->in_reconfig); ret = 0; /* succeeded, so commit it to the data structures */ conf = &new_ctx->conf; if (!local->in_reconfig) list_add(&link->assigned_chanctx_list, &new_ctx->assigned_links); } } else { ret = 0; } rcu_assign_pointer(link->conf->chanctx_conf, conf); if (curr_ctx && ieee80211_chanctx_num_assigned(local, curr_ctx) > 0) { ieee80211_recalc_chanctx_chantype(local, curr_ctx); ieee80211_recalc_smps_chanctx(local, curr_ctx); ieee80211_recalc_radar_chanctx(local, curr_ctx); ieee80211_recalc_chanctx_min_def(local, curr_ctx, NULL, false); } if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) { ieee80211_recalc_txpower(link, false); ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL, false); } if (conf) { new_idle = false; } else { struct ieee80211_link_data *tmp; new_idle = true; for_each_sdata_link(local, tmp) { if (rcu_access_pointer(tmp->conf->chanctx_conf)) { new_idle = false; break; } } } if (new_idle != sdata->vif.cfg.idle) { sdata->vif.cfg.idle = new_idle; if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && sdata->vif.type != NL80211_IFTYPE_MONITOR) ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_IDLE); } ieee80211_check_fast_xmit_iface(sdata); return ret; } void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *chanctx) { struct ieee80211_sub_if_data *sdata; u8 rx_chains_static, rx_chains_dynamic; struct ieee80211_link_data *link; lockdep_assert_wiphy(local->hw.wiphy); rx_chains_static = 1; rx_chains_dynamic = 1; for_each_sdata_link(local, link) { u8 needed_static, needed_dynamic; switch (link->sdata->vif.type) { case NL80211_IFTYPE_STATION: if (!link->sdata->u.mgd.associated) continue; break; case NL80211_IFTYPE_MONITOR: if (!ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) continue; break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_OCB: break; default: continue; } if (rcu_access_pointer(link->conf->chanctx_conf) != &chanctx->conf) continue; if (link->sdata->vif.type == NL80211_IFTYPE_MONITOR) { rx_chains_dynamic = rx_chains_static = local->rx_chains; break; } switch (link->smps_mode) { default: WARN_ONCE(1, "Invalid SMPS mode %d\n", link->smps_mode); fallthrough; case IEEE80211_SMPS_OFF: needed_static = link->needed_rx_chains; needed_dynamic = link->needed_rx_chains; break; case IEEE80211_SMPS_DYNAMIC: needed_static = 1; needed_dynamic = link->needed_rx_chains; break; case IEEE80211_SMPS_STATIC: needed_static = 1; needed_dynamic = 1; break; } rx_chains_static = max(rx_chains_static, needed_static); rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic); } /* Disable SMPS for the monitor interface */ sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata); if (sdata && rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &chanctx->conf) rx_chains_dynamic = rx_chains_static = local->rx_chains; if (rx_chains_static == chanctx->conf.rx_chains_static && rx_chains_dynamic == chanctx->conf.rx_chains_dynamic) return; chanctx->conf.rx_chains_static = rx_chains_static; chanctx->conf.rx_chains_dynamic = rx_chains_dynamic; drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RX_CHAINS); } static void __ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link, bool clear) { struct ieee80211_sub_if_data *sdata = link->sdata; unsigned int link_id = link->link_id; struct ieee80211_bss_conf *link_conf = link->conf; struct ieee80211_local *local __maybe_unused = sdata->local; struct ieee80211_sub_if_data *vlan; struct ieee80211_chanctx_conf *conf; if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP)) return; lockdep_assert_wiphy(local->hw.wiphy); /* Check that conf exists, even when clearing this function * must be called with the AP's channel context still there * as it would otherwise cause VLANs to have an invalid * channel context pointer for a while, possibly pointing * to a channel context that has already been freed. */ conf = rcu_dereference_protected(link_conf->chanctx_conf, lockdep_is_held(&local->hw.wiphy->mtx)); WARN_ON(!conf); if (clear) conf = NULL; list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) { struct ieee80211_bss_conf *vlan_conf; vlan_conf = wiphy_dereference(local->hw.wiphy, vlan->vif.link_conf[link_id]); if (WARN_ON(!vlan_conf)) continue; rcu_assign_pointer(vlan_conf->chanctx_conf, conf); } } void ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link, bool clear) { struct ieee80211_local *local = link->sdata->local; lockdep_assert_wiphy(local->hw.wiphy); __ieee80211_link_copy_chanctx_to_vlans(link, clear); } void ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_chanctx *ctx = link->reserved_chanctx; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (WARN_ON(!ctx)) return; list_del(&link->reserved_chanctx_list); link->reserved_chanctx = NULL; if (ieee80211_chanctx_refcount(sdata->local, ctx) == 0) { if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) { if (WARN_ON(!ctx->replace_ctx)) return; WARN_ON(ctx->replace_ctx->replace_state != IEEE80211_CHANCTX_WILL_BE_REPLACED); WARN_ON(ctx->replace_ctx->replace_ctx != ctx); ctx->replace_ctx->replace_ctx = NULL; ctx->replace_ctx->replace_state = IEEE80211_CHANCTX_REPLACE_NONE; list_del_rcu(&ctx->list); kfree_rcu(ctx, rcu_head); } else { ieee80211_free_chanctx(sdata->local, ctx, false); } } } static struct ieee80211_chanctx * ieee80211_replace_chanctx(struct ieee80211_local *local, const struct ieee80211_chan_req *chanreq, enum ieee80211_chanctx_mode mode, struct ieee80211_chanctx *curr_ctx) { struct ieee80211_chanctx *new_ctx, *ctx; struct wiphy *wiphy = local->hw.wiphy; const struct wiphy_radio *radio; if (!curr_ctx || (curr_ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) || !list_empty(&curr_ctx->reserved_links)) { /* * Another link already requested this context for a * reservation. Find another one hoping all links assigned * to it will also switch soon enough. * * TODO: This needs a little more work as some cases * (more than 2 chanctx capable devices) may fail which could * otherwise succeed provided some channel context juggling was * performed. * * Consider ctx1..3, link1..6, each ctx has 2 links. link1 and * link2 from ctx1 request new different chandefs starting 2 * in-place reservations with ctx4 and ctx5 replacing ctx1 and * ctx2 respectively. Next link5 and link6 from ctx3 reserve * ctx4. If link3 and link4 remain on ctx2 as they are then this * fails unless `replace_ctx` from ctx5 is replaced with ctx3. */ list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state != IEEE80211_CHANCTX_REPLACE_NONE) continue; if (!list_empty(&ctx->reserved_links)) continue; if (ctx->conf.radio_idx >= 0) { radio = &wiphy->radio[ctx->conf.radio_idx]; if (!cfg80211_radio_chandef_valid(radio, &chanreq->oper)) continue; } curr_ctx = ctx; break; } } /* * If that's true then all available contexts already have reservations * and cannot be used. */ if (!curr_ctx || (curr_ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) || !list_empty(&curr_ctx->reserved_links)) return ERR_PTR(-EBUSY); new_ctx = ieee80211_alloc_chanctx(local, chanreq, mode, -1); if (!new_ctx) return ERR_PTR(-ENOMEM); new_ctx->replace_ctx = curr_ctx; new_ctx->replace_state = IEEE80211_CHANCTX_REPLACES_OTHER; curr_ctx->replace_ctx = new_ctx; curr_ctx->replace_state = IEEE80211_CHANCTX_WILL_BE_REPLACED; list_add_rcu(&new_ctx->list, &local->chanctx_list); return new_ctx; } static bool ieee80211_find_available_radio(struct ieee80211_local *local, const struct ieee80211_chan_req *chanreq, u32 radio_mask, int *radio_idx) { struct wiphy *wiphy = local->hw.wiphy; const struct wiphy_radio *radio; int i; *radio_idx = -1; if (!wiphy->n_radio) return true; for (i = 0; i < wiphy->n_radio; i++) { if (!(radio_mask & BIT(i))) continue; radio = &wiphy->radio[i]; if (!cfg80211_radio_chandef_valid(radio, &chanreq->oper)) continue; if (!ieee80211_can_create_new_chanctx(local, i)) continue; *radio_idx = i; return true; } return false; } int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link, const struct ieee80211_chan_req *chanreq, enum ieee80211_chanctx_mode mode, bool radar_required) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx *new_ctx, *curr_ctx; int radio_idx; lockdep_assert_wiphy(local->hw.wiphy); curr_ctx = ieee80211_link_get_chanctx(link); if (curr_ctx && !local->ops->switch_vif_chanctx) return -EOPNOTSUPP; new_ctx = ieee80211_find_reservation_chanctx(local, chanreq, mode); if (!new_ctx) { if (ieee80211_can_create_new_chanctx(local, -1) && ieee80211_find_available_radio(local, chanreq, sdata->wdev.radio_mask, &radio_idx)) new_ctx = ieee80211_new_chanctx(local, chanreq, mode, false, radio_idx); else new_ctx = ieee80211_replace_chanctx(local, chanreq, mode, curr_ctx); if (IS_ERR(new_ctx)) return PTR_ERR(new_ctx); } list_add(&link->reserved_chanctx_list, &new_ctx->reserved_links); link->reserved_chanctx = new_ctx; link->reserved = *chanreq; link->reserved_radar_required = radar_required; link->reserved_ready = false; return 0; } static void ieee80211_link_chanctx_reservation_complete(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; switch (sdata->vif.type) { case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_OCB: wiphy_work_queue(sdata->local->hw.wiphy, &link->csa.finalize_work); break; case NL80211_IFTYPE_STATION: wiphy_hrtimer_work_queue(sdata->local->hw.wiphy, &link->u.mgd.csa.switch_work, 0); break; case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: case NUM_NL80211_IFTYPES: WARN_ON(1); break; } } static void ieee80211_link_update_chanreq(struct ieee80211_link_data *link, const struct ieee80211_chan_req *chanreq) { struct ieee80211_sub_if_data *sdata = link->sdata; unsigned int link_id = link->link_id; struct ieee80211_sub_if_data *vlan; link->conf->chanreq = *chanreq; if (sdata->vif.type != NL80211_IFTYPE_AP) return; list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) { struct ieee80211_bss_conf *vlan_conf; vlan_conf = wiphy_dereference(sdata->local->hw.wiphy, vlan->vif.link_conf[link_id]); if (WARN_ON(!vlan_conf)) continue; vlan_conf->chanreq = *chanreq; } } static int ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_bss_conf *link_conf = link->conf; struct ieee80211_local *local = sdata->local; struct ieee80211_vif_chanctx_switch vif_chsw[1] = {}; struct ieee80211_chanctx *old_ctx, *new_ctx; const struct ieee80211_chan_req *chanreq; struct ieee80211_chan_req tmp; u64 changed = 0; int err; lockdep_assert_wiphy(local->hw.wiphy); new_ctx = link->reserved_chanctx; old_ctx = ieee80211_link_get_chanctx(link); if (WARN_ON(!link->reserved_ready)) return -EBUSY; if (WARN_ON(!new_ctx)) return -EINVAL; if (WARN_ON(!old_ctx)) return -EINVAL; if (WARN_ON(new_ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER)) return -EINVAL; chanreq = ieee80211_chanctx_non_reserved_chandef(local, new_ctx, &link->reserved, &tmp); if (WARN_ON(!chanreq)) return -EINVAL; if (link_conf->chanreq.oper.width != link->reserved.oper.width) changed = BSS_CHANGED_BANDWIDTH; ieee80211_link_update_chanreq(link, &link->reserved); _ieee80211_change_chanctx(local, new_ctx, old_ctx, chanreq, link); vif_chsw[0].vif = &sdata->vif; vif_chsw[0].old_ctx = &old_ctx->conf; vif_chsw[0].new_ctx = &new_ctx->conf; vif_chsw[0].link_conf = link->conf; list_del(&link->reserved_chanctx_list); link->reserved_chanctx = NULL; err = drv_switch_vif_chanctx(local, vif_chsw, 1, CHANCTX_SWMODE_REASSIGN_VIF); if (err) { if (ieee80211_chanctx_refcount(local, new_ctx) == 0) ieee80211_free_chanctx(local, new_ctx, false); goto out; } link->radar_required = link->reserved_radar_required; list_move(&link->assigned_chanctx_list, &new_ctx->assigned_links); rcu_assign_pointer(link_conf->chanctx_conf, &new_ctx->conf); if (sdata->vif.type == NL80211_IFTYPE_AP) __ieee80211_link_copy_chanctx_to_vlans(link, false); ieee80211_check_fast_xmit_iface(sdata); if (ieee80211_chanctx_refcount(local, old_ctx) == 0) ieee80211_free_chanctx(local, old_ctx, false); ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL, false); ieee80211_recalc_smps_chanctx(local, new_ctx); ieee80211_recalc_radar_chanctx(local, new_ctx); if (changed) ieee80211_link_info_change_notify(sdata, link, changed); out: ieee80211_link_chanctx_reservation_complete(link); return err; } static int ieee80211_link_use_reserved_assign(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx *old_ctx, *new_ctx; const struct ieee80211_chan_req *chanreq; struct ieee80211_chan_req tmp; int err; old_ctx = ieee80211_link_get_chanctx(link); new_ctx = link->reserved_chanctx; if (WARN_ON(!link->reserved_ready)) return -EINVAL; if (WARN_ON(old_ctx)) return -EINVAL; if (WARN_ON(!new_ctx)) return -EINVAL; if (WARN_ON(new_ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER)) return -EINVAL; chanreq = ieee80211_chanctx_non_reserved_chandef(local, new_ctx, &link->reserved, &tmp); if (WARN_ON(!chanreq)) return -EINVAL; ieee80211_change_chanctx(local, new_ctx, new_ctx, chanreq); list_del(&link->reserved_chanctx_list); link->reserved_chanctx = NULL; err = ieee80211_assign_link_chanctx(link, new_ctx, false); if (err) { if (ieee80211_chanctx_refcount(local, new_ctx) == 0) ieee80211_free_chanctx(local, new_ctx, false); goto out; } out: ieee80211_link_chanctx_reservation_complete(link); return err; } static bool ieee80211_link_has_in_place_reservation(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_chanctx *old_ctx, *new_ctx; lockdep_assert_wiphy(sdata->local->hw.wiphy); new_ctx = link->reserved_chanctx; old_ctx = ieee80211_link_get_chanctx(link); if (!old_ctx) return false; if (WARN_ON(!new_ctx)) return false; if (old_ctx->replace_state != IEEE80211_CHANCTX_WILL_BE_REPLACED) return false; if (new_ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) return false; return true; } static int ieee80211_chsw_switch_vifs(struct ieee80211_local *local, int n_vifs) { struct ieee80211_vif_chanctx_switch *vif_chsw; struct ieee80211_link_data *link; struct ieee80211_chanctx *ctx, *old_ctx; int i, err; lockdep_assert_wiphy(local->hw.wiphy); vif_chsw = kcalloc(n_vifs, sizeof(vif_chsw[0]), GFP_KERNEL); if (!vif_chsw) return -ENOMEM; i = 0; list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; if (WARN_ON(!ctx->replace_ctx)) { err = -EINVAL; goto out; } list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) { if (!ieee80211_link_has_in_place_reservation(link)) continue; old_ctx = ieee80211_link_get_chanctx(link); vif_chsw[i].vif = &link->sdata->vif; vif_chsw[i].old_ctx = &old_ctx->conf; vif_chsw[i].new_ctx = &ctx->conf; vif_chsw[i].link_conf = link->conf; i++; } } err = drv_switch_vif_chanctx(local, vif_chsw, n_vifs, CHANCTX_SWMODE_SWAP_CONTEXTS); out: kfree(vif_chsw); return err; } static int ieee80211_chsw_switch_ctxs(struct ieee80211_local *local) { struct ieee80211_chanctx *ctx; int err; lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; if (!list_empty(&ctx->replace_ctx->assigned_links)) continue; ieee80211_del_chanctx(local, ctx->replace_ctx, false); err = ieee80211_add_chanctx(local, ctx); if (err) goto err; } return 0; err: WARN_ON(ieee80211_add_chanctx(local, ctx)); list_for_each_entry_continue_reverse(ctx, &local->chanctx_list, list) { if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; if (!list_empty(&ctx->replace_ctx->assigned_links)) continue; ieee80211_del_chanctx(local, ctx, false); WARN_ON(ieee80211_add_chanctx(local, ctx->replace_ctx)); } return err; } static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) { struct ieee80211_chanctx *ctx, *ctx_tmp, *old_ctx; int err, n_assigned, n_reserved, n_ready; int n_ctx = 0, n_vifs_switch = 0, n_vifs_assign = 0, n_vifs_ctxless = 0; lockdep_assert_wiphy(local->hw.wiphy); /* * If there are 2 independent pairs of channel contexts performing * cross-switch of their vifs this code will still wait until both are * ready even though it could be possible to switch one before the * other is ready. * * For practical reasons and code simplicity just do a single huge * switch. */ /* * Verify if the reservation is still feasible. * - if it's not then disconnect * - if it is but not all vifs necessary are ready then defer */ list_for_each_entry(ctx, &local->chanctx_list, list) { struct ieee80211_link_data *link; if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; if (WARN_ON(!ctx->replace_ctx)) { err = -EINVAL; goto err; } n_ctx++; n_assigned = 0; n_reserved = 0; n_ready = 0; list_for_each_entry(link, &ctx->replace_ctx->assigned_links, assigned_chanctx_list) { n_assigned++; if (link->reserved_chanctx) { n_reserved++; if (link->reserved_ready) n_ready++; } } if (n_assigned != n_reserved) { if (n_ready == n_reserved) { wiphy_info(local->hw.wiphy, "channel context reservation cannot be finalized because some interfaces aren't switching\n"); err = -EBUSY; goto err; } return -EAGAIN; } ctx->conf.radar_enabled = false; list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) { if (ieee80211_link_has_in_place_reservation(link) && !link->reserved_ready) return -EAGAIN; old_ctx = ieee80211_link_get_chanctx(link); if (old_ctx) { if (old_ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) n_vifs_switch++; else n_vifs_assign++; } else { n_vifs_ctxless++; } if (link->reserved_radar_required) ctx->conf.radar_enabled = true; } } if (WARN_ON(n_ctx == 0) || WARN_ON(n_vifs_switch == 0 && n_vifs_assign == 0 && n_vifs_ctxless == 0)) { err = -EINVAL; goto err; } /* update station rate control and min width before switch */ list_for_each_entry(ctx, &local->chanctx_list, list) { struct ieee80211_link_data *link; if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; if (WARN_ON(!ctx->replace_ctx)) { err = -EINVAL; goto err; } list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) { if (!ieee80211_link_has_in_place_reservation(link)) continue; ieee80211_chan_bw_change(local, ieee80211_link_get_chanctx(link), true, true); } ieee80211_recalc_chanctx_min_def(local, ctx, NULL, true); } /* * All necessary vifs are ready. Perform the switch now depending on * reservations and driver capabilities. */ if (n_vifs_switch > 0) { err = ieee80211_chsw_switch_vifs(local, n_vifs_switch); if (err) goto err; } if (n_vifs_assign > 0 || n_vifs_ctxless > 0) { err = ieee80211_chsw_switch_ctxs(local); if (err) goto err; } /* * Update all structures, values and pointers to point to new channel * context(s). */ list_for_each_entry(ctx, &local->chanctx_list, list) { struct ieee80211_link_data *link, *link_tmp; if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; if (WARN_ON(!ctx->replace_ctx)) { err = -EINVAL; goto err; } list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_bss_conf *link_conf = link->conf; u64 changed = 0; if (!ieee80211_link_has_in_place_reservation(link)) continue; rcu_assign_pointer(link_conf->chanctx_conf, &ctx->conf); if (sdata->vif.type == NL80211_IFTYPE_AP) __ieee80211_link_copy_chanctx_to_vlans(link, false); ieee80211_check_fast_xmit_iface(sdata); link->radar_required = link->reserved_radar_required; if (link_conf->chanreq.oper.width != link->reserved.oper.width) changed = BSS_CHANGED_BANDWIDTH; ieee80211_link_update_chanreq(link, &link->reserved); if (changed) ieee80211_link_info_change_notify(sdata, link, changed); ieee80211_recalc_txpower(link, false); } ieee80211_recalc_chanctx_chantype(local, ctx); ieee80211_recalc_smps_chanctx(local, ctx); ieee80211_recalc_radar_chanctx(local, ctx); ieee80211_recalc_chanctx_min_def(local, ctx, NULL, false); list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links, reserved_chanctx_list) { if (ieee80211_link_get_chanctx(link) != ctx) continue; list_del(&link->reserved_chanctx_list); list_move(&link->assigned_chanctx_list, &ctx->assigned_links); link->reserved_chanctx = NULL; ieee80211_link_chanctx_reservation_complete(link); ieee80211_chan_bw_change(local, ctx, false, false); } /* * This context might have been a dependency for an already * ready re-assign reservation interface that was deferred. Do * not propagate error to the caller though. The in-place * reservation for originally requested interface has already * succeeded at this point. */ list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links, reserved_chanctx_list) { if (WARN_ON(ieee80211_link_has_in_place_reservation(link))) continue; if (WARN_ON(link->reserved_chanctx != ctx)) continue; if (!link->reserved_ready) continue; if (ieee80211_link_get_chanctx(link)) err = ieee80211_link_use_reserved_reassign(link); else err = ieee80211_link_use_reserved_assign(link); if (err) { link_info(link, "failed to finalize (re-)assign reservation (err=%d)\n", err); ieee80211_link_unreserve_chanctx(link); cfg80211_stop_iface(local->hw.wiphy, &link->sdata->wdev, GFP_KERNEL); } } } /* * Finally free old contexts */ list_for_each_entry_safe(ctx, ctx_tmp, &local->chanctx_list, list) { if (ctx->replace_state != IEEE80211_CHANCTX_WILL_BE_REPLACED) continue; ctx->replace_ctx->replace_ctx = NULL; ctx->replace_ctx->replace_state = IEEE80211_CHANCTX_REPLACE_NONE; list_del_rcu(&ctx->list); kfree_rcu(ctx, rcu_head); } return 0; err: list_for_each_entry(ctx, &local->chanctx_list, list) { struct ieee80211_link_data *link, *link_tmp; if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) continue; list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links, reserved_chanctx_list) { ieee80211_link_unreserve_chanctx(link); ieee80211_link_chanctx_reservation_complete(link); } } return err; } void __ieee80211_link_release_channel(struct ieee80211_link_data *link, bool skip_idle_recalc) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_bss_conf *link_conf = link->conf; struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *ctx; bool use_reserved_switch = false; lockdep_assert_wiphy(local->hw.wiphy); conf = rcu_dereference_protected(link_conf->chanctx_conf, lockdep_is_held(&local->hw.wiphy->mtx)); if (!conf) return; ctx = container_of(conf, struct ieee80211_chanctx, conf); if (link->reserved_chanctx) { if (link->reserved_chanctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER && ieee80211_chanctx_num_reserved(local, link->reserved_chanctx) > 1) use_reserved_switch = true; ieee80211_link_unreserve_chanctx(link); } ieee80211_assign_link_chanctx(link, NULL, false); if (ieee80211_chanctx_refcount(local, ctx) == 0) ieee80211_free_chanctx(local, ctx, skip_idle_recalc); link->radar_required = false; /* Unreserving may ready an in-place reservation. */ if (use_reserved_switch) ieee80211_vif_use_reserved_switch(local); } int _ieee80211_link_use_channel(struct ieee80211_link_data *link, const struct ieee80211_chan_req *chanreq, enum ieee80211_chanctx_mode mode, bool assign_on_failure) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx *ctx; u8 radar_detect_width = 0; bool reserved = false; int radio_idx; int ret; lockdep_assert_wiphy(local->hw.wiphy); if (!ieee80211_vif_link_active(&sdata->vif, link->link_id)) { ieee80211_link_update_chanreq(link, chanreq); return 0; } ret = cfg80211_chandef_dfs_required(local->hw.wiphy, &chanreq->oper, sdata->wdev.iftype); if (ret < 0) goto out; if (ret > 0) radar_detect_width = BIT(chanreq->oper.width); link->radar_required = ret; ret = ieee80211_check_combinations(sdata, &chanreq->oper, mode, radar_detect_width, -1); if (ret < 0) goto out; if (!local->in_reconfig) __ieee80211_link_release_channel(link, false); ctx = ieee80211_find_chanctx(local, link, chanreq, mode); /* Note: context is now reserved */ if (ctx) reserved = true; else if (!ieee80211_find_available_radio(local, chanreq, sdata->wdev.radio_mask, &radio_idx)) ctx = ERR_PTR(-EBUSY); else ctx = ieee80211_new_chanctx(local, chanreq, mode, assign_on_failure, radio_idx); if (IS_ERR(ctx)) { ret = PTR_ERR(ctx); goto out; } ieee80211_link_update_chanreq(link, chanreq); ret = ieee80211_assign_link_chanctx(link, ctx, assign_on_failure); if (reserved) { /* remove reservation */ WARN_ON(link->reserved_chanctx != ctx); link->reserved_chanctx = NULL; list_del(&link->reserved_chanctx_list); } if (ret) { /* if assign fails refcount stays the same */ if (ieee80211_chanctx_refcount(local, ctx) == 0) ieee80211_free_chanctx(local, ctx, false); goto out; } ieee80211_recalc_smps_chanctx(local, ctx); ieee80211_recalc_radar_chanctx(local, ctx); out: if (ret) link->radar_required = false; return ret; } int ieee80211_link_use_reserved_context(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx *new_ctx; struct ieee80211_chanctx *old_ctx; int err; lockdep_assert_wiphy(local->hw.wiphy); new_ctx = link->reserved_chanctx; old_ctx = ieee80211_link_get_chanctx(link); if (WARN_ON(!new_ctx)) return -EINVAL; if (WARN_ON(new_ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)) return -EINVAL; if (WARN_ON(link->reserved_ready)) return -EINVAL; link->reserved_ready = true; if (new_ctx->replace_state == IEEE80211_CHANCTX_REPLACE_NONE) { if (old_ctx) return ieee80211_link_use_reserved_reassign(link); return ieee80211_link_use_reserved_assign(link); } /* * In-place reservation may need to be finalized now either if: * a) sdata is taking part in the swapping itself and is the last one * b) sdata has switched with a re-assign reservation to an existing * context readying in-place switching of old_ctx * * In case of (b) do not propagate the error up because the requested * sdata already switched successfully. Just spill an extra warning. * The ieee80211_vif_use_reserved_switch() already stops all necessary * interfaces upon failure. */ if ((old_ctx && old_ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) || new_ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) { err = ieee80211_vif_use_reserved_switch(local); if (err && err != -EAGAIN) { if (new_ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) return err; wiphy_info(local->hw.wiphy, "depending in-place reservation failed (err=%d)\n", err); } } return 0; } /* * This is similar to ieee80211_chanctx_compatible(), but rechecks * against all the links actually using it (except the one that's * passed, since that one is changing). * This is done in order to allow changes to the AP's bandwidth for * wider bandwidth OFDMA purposes, which wouldn't be treated as * compatible by ieee80211_chanctx_recheck() but is OK if the link * requesting the update is the only one using it. */ static const struct ieee80211_chan_req * ieee80211_chanctx_recheck(struct ieee80211_local *local, struct ieee80211_link_data *skip_link, struct ieee80211_chanctx *ctx, const struct ieee80211_chan_req *req, struct ieee80211_chan_req *tmp) { const struct ieee80211_chan_req *ret = req; struct ieee80211_link_data *link; lockdep_assert_wiphy(local->hw.wiphy); for_each_sdata_link(local, link) { if (link == skip_link) continue; if (rcu_access_pointer(link->conf->chanctx_conf) == &ctx->conf) { ret = ieee80211_chanreq_compatible(ret, &link->conf->chanreq, tmp); if (!ret) return NULL; } if (link->reserved_chanctx == ctx) { ret = ieee80211_chanreq_compatible(ret, &link->reserved, tmp); if (!ret) return NULL; } } *tmp = *ret; return tmp; } int ieee80211_link_change_chanreq(struct ieee80211_link_data *link, const struct ieee80211_chan_req *chanreq, u64 *changed) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_bss_conf *link_conf = link->conf; struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *ctx; const struct ieee80211_chan_req *compat; struct ieee80211_chan_req tmp; lockdep_assert_wiphy(local->hw.wiphy); if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, &chanreq->oper, IEEE80211_CHAN_DISABLED)) return -EINVAL; /* for non-HT 20 MHz the rest doesn't matter */ if (chanreq->oper.width == NL80211_CHAN_WIDTH_20_NOHT && cfg80211_chandef_identical(&chanreq->oper, &link_conf->chanreq.oper)) return 0; /* but you cannot switch to/from it */ if (chanreq->oper.width == NL80211_CHAN_WIDTH_20_NOHT || link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT) return -EINVAL; conf = rcu_dereference_protected(link_conf->chanctx_conf, lockdep_is_held(&local->hw.wiphy->mtx)); if (!conf) return -EINVAL; ctx = container_of(conf, struct ieee80211_chanctx, conf); compat = ieee80211_chanctx_recheck(local, link, ctx, chanreq, &tmp); if (!compat) return -EINVAL; switch (ctx->replace_state) { case IEEE80211_CHANCTX_REPLACE_NONE: if (!ieee80211_chanctx_reserved_chanreq(local, ctx, compat, &tmp)) return -EBUSY; break; case IEEE80211_CHANCTX_WILL_BE_REPLACED: /* TODO: Perhaps the bandwidth change could be treated as a * reservation itself? */ return -EBUSY; case IEEE80211_CHANCTX_REPLACES_OTHER: /* channel context that is going to replace another channel * context doesn't really exist and shouldn't be assigned * anywhere yet */ WARN_ON(1); break; } ieee80211_link_update_chanreq(link, chanreq); ieee80211_recalc_chanctx_chantype(local, ctx); *changed |= BSS_CHANGED_BANDWIDTH; return 0; } void ieee80211_link_release_channel(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) return; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (rcu_access_pointer(link->conf->chanctx_conf)) __ieee80211_link_release_channel(link, false); } void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; unsigned int link_id = link->link_id; struct ieee80211_bss_conf *link_conf = link->conf; struct ieee80211_bss_conf *ap_conf; struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *ap; struct ieee80211_chanctx_conf *conf; lockdep_assert_wiphy(local->hw.wiphy); if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->bss)) return; ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); ap_conf = wiphy_dereference(local->hw.wiphy, ap->vif.link_conf[link_id]); conf = wiphy_dereference(local->hw.wiphy, ap_conf->chanctx_conf); rcu_assign_pointer(link_conf->chanctx_conf, conf); } void ieee80211_iter_chan_contexts_atomic( struct ieee80211_hw *hw, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *chanctx_conf, void *data), void *iter_data) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_chanctx *ctx; rcu_read_lock(); list_for_each_entry_rcu(ctx, &local->chanctx_list, list) if (ctx->driver_present) iter(hw, &ctx->conf, iter_data); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(ieee80211_iter_chan_contexts_atomic); void ieee80211_iter_chan_contexts_mtx( struct ieee80211_hw *hw, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *chanctx_conf, void *data), void *iter_data) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_chanctx *ctx; lockdep_assert_wiphy(hw->wiphy); list_for_each_entry(ctx, &local->chanctx_list, list) if (ctx->driver_present) iter(hw, &ctx->conf, iter_data); } EXPORT_SYMBOL_GPL(ieee80211_iter_chan_contexts_mtx);
42 42 42 33 34 34 5 14 6 52 27 26 2 442 442 437 440 53 3 45 40 443 440 443 17 4 11 2 15 1 4 6 9 8 1 3 96 95 95 94 20 17 96 37 1 39 40 2 38 38 25 1 1 1 23 23 23 23 19 4 23 5 6 8 3 6 5 10 1 17 8 51 8 15 25 20 11 118 2 1 99 25 69 18 37 23 83 83 77 7 83 84 1 18 2 9 103 37 22 15 55 12 1 103 102 28 74 26 85 63 25 50 4 6 9 43 84 23 15 7 22 8 8 9 1 7 1 8 8 15 1 1 5 2 9 2 9 3 6 4 5 5 4 7 3 4 3 1 1 1 1 1 3 126 121 5 1 2 3 37 31 6 10 1 1 1 7 1 8 8 6 1 6 7 5 7 8 8 8 6 8 3 4 4 3 15 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 // SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * RAW - implementation of IP "raw" sockets. * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Fixes: * Alan Cox : verify_area() fixed up * Alan Cox : ICMP error handling * Alan Cox : EMSGSIZE if you send too big a packet * Alan Cox : Now uses generic datagrams and shared * skbuff library. No more peek crashes, * no more backlogs * Alan Cox : Checks sk->broadcast. * Alan Cox : Uses skb_free_datagram/skb_copy_datagram * Alan Cox : Raw passes ip options too * Alan Cox : Setsocketopt added * Alan Cox : Fixed error return for broadcasts * Alan Cox : Removed wake_up calls * Alan Cox : Use ttl/tos * Alan Cox : Cleaned up old debugging * Alan Cox : Use new kernel side addresses * Arnt Gulbrandsen : Fixed MSG_DONTROUTE in raw sockets. * Alan Cox : BSD style RAW socket demultiplexing. * Alan Cox : Beginnings of mrouted support. * Alan Cox : Added IP_HDRINCL option. * Alan Cox : Skip broadcast check if BSDism set. * David S. Miller : New socket lookup architecture. */ #include <linux/types.h> #include <linux/atomic.h> #include <asm/byteorder.h> #include <asm/current.h> #include <linux/uaccess.h> #include <asm/ioctls.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/sockios.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/mroute.h> #include <linux/netdevice.h> #include <linux/in_route.h> #include <linux/route.h> #include <linux/skbuff.h> #include <linux/igmp.h> #include <net/net_namespace.h> #include <net/dst.h> #include <net/sock.h> #include <linux/ip.h> #include <linux/net.h> #include <net/ip.h> #include <net/icmp.h> #include <net/udp.h> #include <net/raw.h> #include <net/snmp.h> #include <net/tcp_states.h> #include <net/inet_common.h> #include <net/checksum.h> #include <net/xfrm.h> #include <linux/rtnetlink.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/compat.h> #include <linux/uio.h> struct raw_frag_vec { struct msghdr *msg; union { struct icmphdr icmph; char c[1]; } hdr; int hlen; }; struct raw_hashinfo raw_v4_hashinfo; EXPORT_SYMBOL_GPL(raw_v4_hashinfo); int raw_hash_sk(struct sock *sk) { struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; struct hlist_head *hlist; hlist = &h->ht[raw_hashfunc(sock_net(sk), inet_sk(sk)->inet_num)]; spin_lock(&h->lock); sk_add_node_rcu(sk, hlist); sock_set_flag(sk, SOCK_RCU_FREE); spin_unlock(&h->lock); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); return 0; } EXPORT_SYMBOL_GPL(raw_hash_sk); void raw_unhash_sk(struct sock *sk) { struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; spin_lock(&h->lock); if (sk_del_node_init_rcu(sk)) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); spin_unlock(&h->lock); } EXPORT_SYMBOL_GPL(raw_unhash_sk); bool raw_v4_match(struct net *net, const struct sock *sk, unsigned short num, __be32 raddr, __be32 laddr, int dif, int sdif) { const struct inet_sock *inet = inet_sk(sk); if (net_eq(sock_net(sk), net) && inet->inet_num == num && !(inet->inet_daddr && inet->inet_daddr != raddr) && !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && raw_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) return true; return false; } EXPORT_SYMBOL_GPL(raw_v4_match); /* * 0 - deliver * 1 - block */ static int icmp_filter(const struct sock *sk, const struct sk_buff *skb) { struct icmphdr _hdr; const struct icmphdr *hdr; hdr = skb_header_pointer(skb, skb_transport_offset(skb), sizeof(_hdr), &_hdr); if (!hdr) return 1; if (hdr->type < 32) { __u32 data = raw_sk(sk)->filter.data; return ((1U << hdr->type) & data) != 0; } /* Do not block unknown ICMP types */ return 0; } /* IP input processing comes here for RAW socket delivery. * Caller owns SKB, so we must make clones. * * RFC 1122: SHOULD pass TOS value up to the transport layer. * -> It does. And not only TOS, but all IP header. */ static int raw_v4_input(struct net *net, struct sk_buff *skb, const struct iphdr *iph, int hash) { int sdif = inet_sdif(skb); struct hlist_head *hlist; int dif = inet_iif(skb); int delivered = 0; struct sock *sk; hlist = &raw_v4_hashinfo.ht[hash]; rcu_read_lock(); sk_for_each_rcu(sk, hlist) { if (!raw_v4_match(net, sk, iph->protocol, iph->saddr, iph->daddr, dif, sdif)) continue; if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) { sk_drops_inc(sk); continue; } delivered = 1; if ((iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) && ip_mc_sf_allow(sk, iph->daddr, iph->saddr, skb->dev->ifindex, sdif)) { struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); /* Not releasing hash table! */ if (clone) raw_rcv(sk, clone); } } rcu_read_unlock(); return delivered; } int raw_local_deliver(struct sk_buff *skb, int protocol) { struct net *net = dev_net(skb->dev); return raw_v4_input(net, skb, ip_hdr(skb), raw_hashfunc(net, protocol)); } static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info) { struct inet_sock *inet = inet_sk(sk); const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; int harderr = 0; bool recverr; int err = 0; if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) ipv4_sk_update_pmtu(skb, sk, info); else if (type == ICMP_REDIRECT) { ipv4_sk_redirect(skb, sk); return; } /* Report error on raw socket, if: 1. User requested ip_recverr. 2. Socket is connected (otherwise the error indication is useless without ip_recverr and error is hard. */ recverr = inet_test_bit(RECVERR, sk); if (!recverr && sk->sk_state != TCP_ESTABLISHED) return; switch (type) { default: case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; case ICMP_SOURCE_QUENCH: return; case ICMP_PARAMETERPROB: err = EPROTO; harderr = 1; break; case ICMP_DEST_UNREACH: err = EHOSTUNREACH; if (code > NR_ICMP_UNREACH) break; if (code == ICMP_FRAG_NEEDED) { harderr = READ_ONCE(inet->pmtudisc) != IP_PMTUDISC_DONT; err = EMSGSIZE; } else { err = icmp_err_convert[code].errno; harderr = icmp_err_convert[code].fatal; } } if (recverr) { const struct iphdr *iph = (const struct iphdr *)skb->data; u8 *payload = skb->data + (iph->ihl << 2); if (inet_test_bit(HDRINCL, sk)) payload = skb->data; ip_icmp_error(sk, skb, err, 0, info, payload); } if (recverr || harderr) { sk->sk_err = err; sk_error_report(sk); } } void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info) { struct net *net = dev_net(skb->dev); int dif = skb->dev->ifindex; int sdif = inet_sdif(skb); struct hlist_head *hlist; const struct iphdr *iph; struct sock *sk; int hash; hash = raw_hashfunc(net, protocol); hlist = &raw_v4_hashinfo.ht[hash]; rcu_read_lock(); sk_for_each_rcu(sk, hlist) { iph = (const struct iphdr *)skb->data; if (!raw_v4_match(net, sk, iph->protocol, iph->daddr, iph->saddr, dif, sdif)) continue; raw_err(sk, skb, info); } rcu_read_unlock(); } static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) { enum skb_drop_reason reason; /* Charge it to the socket. */ ipv4_pktinfo_prepare(sk, skb, true); if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) { sk_skb_reason_drop(sk, skb, reason); return NET_RX_DROP; } return NET_RX_SUCCESS; } int raw_rcv(struct sock *sk, struct sk_buff *skb) { if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { sk_drops_inc(sk); sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY); return NET_RX_DROP; } nf_reset_ct(skb); skb_push(skb, -skb_network_offset(skb)); raw_rcv_skb(sk, skb); return 0; } static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, struct msghdr *msg, size_t length, struct rtable **rtp, unsigned int flags, const struct sockcm_cookie *sockc) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); struct iphdr *iph; struct sk_buff *skb; unsigned int iphlen; int err; struct rtable *rt = *rtp; int hlen, tlen; if (length > rt->dst.dev->mtu) { ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, rt->dst.dev->mtu); return -EMSGSIZE; } if (length < sizeof(struct iphdr)) return -EINVAL; if (flags&MSG_PROBE) goto out; hlen = LL_RESERVED_SPACE(rt->dst.dev); tlen = rt->dst.dev->needed_tailroom; skb = sock_alloc_send_skb(sk, length + hlen + tlen + 15, flags & MSG_DONTWAIT, &err); if (!skb) goto error; skb_reserve(skb, hlen); skb->protocol = htons(ETH_P_IP); skb->priority = sockc->priority; skb->mark = sockc->mark; skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, sk->sk_clockid); skb_dst_set(skb, &rt->dst); *rtp = NULL; skb_reset_network_header(skb); iph = ip_hdr(skb); skb_put(skb, length); skb->ip_summed = CHECKSUM_NONE; skb_setup_tx_timestamp(skb, sockc); if (flags & MSG_CONFIRM) skb_set_dst_pending_confirm(skb, 1); skb->transport_header = skb->network_header; err = -EFAULT; if (memcpy_from_msg(iph, msg, length)) goto error_free; iphlen = iph->ihl * 4; /* * We don't want to modify the ip header, but we do need to * be sure that it won't cause problems later along the network * stack. Specifically we want to make sure that iph->ihl is a * sane value. If ihl points beyond the length of the buffer passed * in, reject the frame as invalid */ err = -EINVAL; if (iphlen > length) goto error_free; if (iphlen >= sizeof(*iph)) { if (!iph->saddr) iph->saddr = fl4->saddr; iph->check = 0; iph->tot_len = htons(length); if (!iph->id) ip_select_ident(net, skb, NULL); iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); skb->transport_header += iphlen; if (iph->protocol == IPPROTO_ICMP && length >= iphlen + sizeof(struct icmphdr)) icmp_out_count(net, ((struct icmphdr *) skb_transport_header(skb))->type); } err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, rt->dst.dev, dst_output); if (err > 0) err = net_xmit_errno(err); if (err) goto error; out: return 0; error_free: kfree_skb(skb); error: IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); if (err == -ENOBUFS && !inet_test_bit(RECVERR, sk)) err = 0; return err; } static int raw_probe_proto_opt(struct raw_frag_vec *rfv, struct flowi4 *fl4) { int err; if (fl4->flowi4_proto != IPPROTO_ICMP) return 0; /* We only need the first two bytes. */ rfv->hlen = 2; err = memcpy_from_msg(rfv->hdr.c, rfv->msg, rfv->hlen); if (err) return err; fl4->fl4_icmp_type = rfv->hdr.icmph.type; fl4->fl4_icmp_code = rfv->hdr.icmph.code; return 0; } static int raw_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { struct raw_frag_vec *rfv = from; if (offset < rfv->hlen) { int copy = min(rfv->hlen - offset, len); if (skb->ip_summed == CHECKSUM_PARTIAL) memcpy(to, rfv->hdr.c + offset, copy); else skb->csum = csum_block_add( skb->csum, csum_partial_copy_nocheck(rfv->hdr.c + offset, to, copy), odd); odd = 0; offset += copy; to += copy; len -= copy; if (!len) return 0; } offset -= rfv->hlen; return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb); } static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); struct ipcm_cookie ipc; struct rtable *rt = NULL; struct flowi4 fl4; u8 scope; int free = 0; __be32 daddr; __be32 saddr; int uc_index, err; struct ip_options_data opt_copy; struct raw_frag_vec rfv; int hdrincl; err = -EMSGSIZE; if (len > 0xFFFF) goto out; hdrincl = inet_test_bit(HDRINCL, sk); /* * Check the flags. */ err = -EOPNOTSUPP; if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */ goto out; /* compatibility */ /* * Get and verify the address. */ if (msg->msg_namelen) { DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); err = -EINVAL; if (msg->msg_namelen < sizeof(*usin)) goto out; if (usin->sin_family != AF_INET) { pr_info_once("%s: %s forgot to set AF_INET. Fix it!\n", __func__, current->comm); err = -EAFNOSUPPORT; if (usin->sin_family) goto out; } daddr = usin->sin_addr.s_addr; /* ANK: I did not forget to get protocol from port field. * I just do not know, who uses this weirdness. * IP_HDRINCL is much more convenient. */ } else { err = -EDESTADDRREQ; if (sk->sk_state != TCP_ESTABLISHED) goto out; daddr = inet->inet_daddr; } ipcm_init_sk(&ipc, inet); /* Keep backward compat */ if (hdrincl) ipc.protocol = IPPROTO_RAW; if (msg->msg_controllen) { err = ip_cmsg_send(sk, msg, &ipc, false); if (unlikely(err)) { kfree(ipc.opt); goto out; } if (ipc.opt) free = 1; } saddr = ipc.addr; ipc.addr = daddr; if (!ipc.opt) { struct ip_options_rcu *inet_opt; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt) { memcpy(&opt_copy, inet_opt, sizeof(*inet_opt) + inet_opt->opt.optlen); ipc.opt = &opt_copy.opt; } rcu_read_unlock(); } if (ipc.opt) { err = -EINVAL; /* Linux does not mangle headers on raw sockets, * so that IP options + IP_HDRINCL is non-sense. */ if (hdrincl) goto done; if (ipc.opt->opt.srr) { if (!daddr) goto done; daddr = ipc.opt->opt.faddr; } } scope = ip_sendmsg_scope(inet, &ipc, msg); uc_index = READ_ONCE(inet->uc_index); if (ipv4_is_multicast(daddr)) { if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif)) ipc.oif = READ_ONCE(inet->mc_index); if (!saddr) saddr = READ_ONCE(inet->mc_addr); } else if (!ipc.oif) { ipc.oif = uc_index; } else if (ipv4_is_lbcast(daddr) && uc_index) { /* oif is set, packet is to local broadcast * and uc_index is set. oif is most likely set * by sk_bound_dev_if. If uc_index != oif check if the * oif is an L3 master and uc_index is an L3 slave. * If so, we want to allow the send using the uc_index. */ if (ipc.oif != uc_index && ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk), uc_index)) { ipc.oif = uc_index; } } flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, ipc.tos & INET_DSCP_MASK, scope, hdrincl ? ipc.protocol : sk->sk_protocol, inet_sk_flowi_flags(sk) | (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), daddr, saddr, 0, 0, sk_uid(sk)); fl4.fl4_icmp_type = 0; fl4.fl4_icmp_code = 0; if (!hdrincl) { rfv.msg = msg; rfv.hlen = 0; err = raw_probe_proto_opt(&rfv, &fl4); if (err) goto done; } security_sk_classify_flow(sk, flowi4_to_flowi_common(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto done; } err = -EACCES; if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST)) goto done; if (msg->msg_flags & MSG_CONFIRM) goto do_confirm; back_from_confirm: if (hdrincl) err = raw_send_hdrinc(sk, &fl4, msg, len, &rt, msg->msg_flags, &ipc.sockc); else { if (!ipc.addr) ipc.addr = fl4.daddr; lock_sock(sk); err = ip_append_data(sk, &fl4, raw_getfrag, &rfv, len, 0, &ipc, &rt, msg->msg_flags); if (err) ip_flush_pending_frames(sk); else if (!(msg->msg_flags & MSG_MORE)) { err = ip_push_pending_frames(sk, &fl4); if (err == -ENOBUFS && !inet_test_bit(RECVERR, sk)) err = 0; } release_sock(sk); } done: if (free) kfree(ipc.opt); ip_rt_put(rt); out: if (err < 0) return err; return len; do_confirm: if (msg->msg_flags & MSG_PROBE) dst_confirm_neigh(&rt->dst, &fl4.daddr); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; goto done; } static void raw_close(struct sock *sk, long timeout) { /* * Raw sockets may have direct kernel references. Kill them. */ ip_ra_control(sk, 0, NULL); sk_common_release(sk); } static void raw_destroy(struct sock *sk) { lock_sock(sk); ip_flush_pending_frames(sk); release_sock(sk); } /* This gets rid of all the nasties in af_inet. -DaveM */ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; struct net *net = sock_net(sk); u32 tb_id = RT_TABLE_LOCAL; int ret = -EINVAL; int chk_addr_ret; lock_sock(sk); if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in)) goto out; if (sk->sk_bound_dev_if) tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id; chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id); ret = -EADDRNOTAVAIL; if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr, chk_addr_ret)) goto out; inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr; if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ sk_dst_reset(sk); ret = 0; out: release_sock(sk); return ret; } /* * This should be easy, if there is something there * we return it, otherwise we block. */ static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (flags & MSG_ERRQUEUE) { err = ip_recv_error(sk, msg, len, addr_len); goto out; } skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; sock_recv_cmsgs(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); } if (inet_cmsg_flags(inet)) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: if (err) return err; return copied; } static int raw_sk_init(struct sock *sk) { struct raw_sock *rp = raw_sk(sk); sk->sk_drop_counters = &rp->drop_counters; if (inet_sk(sk)->inet_num == IPPROTO_ICMP) memset(&rp->filter, 0, sizeof(rp->filter)); return 0; } static int raw_seticmpfilter(struct sock *sk, sockptr_t optval, int optlen) { if (optlen > sizeof(struct icmp_filter)) optlen = sizeof(struct icmp_filter); if (copy_from_sockptr(&raw_sk(sk)->filter, optval, optlen)) return -EFAULT; return 0; } static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen) { int len, ret = -EFAULT; if (get_user(len, optlen)) goto out; ret = -EINVAL; if (len < 0) goto out; if (len > sizeof(struct icmp_filter)) len = sizeof(struct icmp_filter); ret = -EFAULT; if (put_user(len, optlen) || copy_to_user(optval, &raw_sk(sk)->filter, len)) goto out; ret = 0; out: return ret; } static int do_raw_setsockopt(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen) { if (optname == ICMP_FILTER) { if (inet_sk(sk)->inet_num != IPPROTO_ICMP) return -EOPNOTSUPP; else return raw_seticmpfilter(sk, optval, optlen); } return -ENOPROTOOPT; } static int raw_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen) { if (level != SOL_RAW) return ip_setsockopt(sk, level, optname, optval, optlen); return do_raw_setsockopt(sk, optname, optval, optlen); } static int do_raw_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) { if (optname == ICMP_FILTER) { if (inet_sk(sk)->inet_num != IPPROTO_ICMP) return -EOPNOTSUPP; else return raw_geticmpfilter(sk, optval, optlen); } return -ENOPROTOOPT; } static int raw_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level != SOL_RAW) return ip_getsockopt(sk, level, optname, optval, optlen); return do_raw_getsockopt(sk, optname, optval, optlen); } static int raw_ioctl(struct sock *sk, int cmd, int *karg) { switch (cmd) { case SIOCOUTQ: { *karg = sk_wmem_alloc_get(sk); return 0; } case SIOCINQ: { struct sk_buff *skb; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) *karg = skb->len; else *karg = 0; spin_unlock_bh(&sk->sk_receive_queue.lock); return 0; } default: #ifdef CONFIG_IP_MROUTE return ipmr_ioctl(sk, cmd, karg); #else return -ENOIOCTLCMD; #endif } } #ifdef CONFIG_COMPAT static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) { switch (cmd) { case SIOCOUTQ: case SIOCINQ: return -ENOIOCTLCMD; default: #ifdef CONFIG_IP_MROUTE return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg)); #else return -ENOIOCTLCMD; #endif } } #endif int raw_abort(struct sock *sk, int err) { lock_sock(sk); sk->sk_err = err; sk_error_report(sk); __udp_disconnect(sk, 0); release_sock(sk); return 0; } EXPORT_SYMBOL_GPL(raw_abort); struct proto raw_prot = { .name = "RAW", .owner = THIS_MODULE, .close = raw_close, .destroy = raw_destroy, .connect = ip4_datagram_connect, .disconnect = __udp_disconnect, .ioctl = raw_ioctl, .init = raw_sk_init, .setsockopt = raw_setsockopt, .getsockopt = raw_getsockopt, .sendmsg = raw_sendmsg, .recvmsg = raw_recvmsg, .bind = raw_bind, .backlog_rcv = raw_rcv_skb, .release_cb = ip4_datagram_release_cb, .hash = raw_hash_sk, .unhash = raw_unhash_sk, .obj_size = sizeof(struct raw_sock), .useroffset = offsetof(struct raw_sock, filter), .usersize = sizeof_field(struct raw_sock, filter), .h.raw_hash = &raw_v4_hashinfo, #ifdef CONFIG_COMPAT .compat_ioctl = compat_raw_ioctl, #endif .diag_destroy = raw_abort, }; #ifdef CONFIG_PROC_FS static struct sock *raw_get_first(struct seq_file *seq, int bucket) { struct raw_hashinfo *h = pde_data(file_inode(seq->file)); struct raw_iter_state *state = raw_seq_private(seq); struct hlist_head *hlist; struct sock *sk; for (state->bucket = bucket; state->bucket < RAW_HTABLE_SIZE; ++state->bucket) { hlist = &h->ht[state->bucket]; sk_for_each(sk, hlist) { if (sock_net(sk) == seq_file_net(seq)) return sk; } } return NULL; } static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk) { struct raw_iter_state *state = raw_seq_private(seq); do { sk = sk_next(sk); } while (sk && sock_net(sk) != seq_file_net(seq)); if (!sk) return raw_get_first(seq, state->bucket + 1); return sk; } static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos) { struct sock *sk = raw_get_first(seq, 0); if (sk) while (pos && (sk = raw_get_next(seq, sk)) != NULL) --pos; return pos ? NULL : sk; } void *raw_seq_start(struct seq_file *seq, loff_t *pos) __acquires(&h->lock) { struct raw_hashinfo *h = pde_data(file_inode(seq->file)); spin_lock(&h->lock); return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } EXPORT_SYMBOL_GPL(raw_seq_start); void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *sk; if (v == SEQ_START_TOKEN) sk = raw_get_first(seq, 0); else sk = raw_get_next(seq, v); ++*pos; return sk; } EXPORT_SYMBOL_GPL(raw_seq_next); void raw_seq_stop(struct seq_file *seq, void *v) __releases(&h->lock) { struct raw_hashinfo *h = pde_data(file_inode(seq->file)); spin_unlock(&h->lock); } EXPORT_SYMBOL_GPL(raw_seq_stop); static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) { struct inet_sock *inet = inet_sk(sp); __be32 dest = inet->inet_daddr, src = inet->inet_rcv_saddr; __u16 destp = 0, srcp = inet->inet_num; seq_printf(seq, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n", i, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, from_kuid_munged(seq_user_ns(seq), sk_uid(sp)), 0, sock_i_ino(sp), refcount_read(&sp->sk_refcnt), sp, sk_drops_read(sp)); } static int raw_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode ref pointer drops\n"); else raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); return 0; } static const struct seq_operations raw_seq_ops = { .start = raw_seq_start, .next = raw_seq_next, .stop = raw_seq_stop, .show = raw_seq_show, }; static __net_init int raw_init_net(struct net *net) { if (!proc_create_net_data("raw", 0444, net->proc_net, &raw_seq_ops, sizeof(struct raw_iter_state), &raw_v4_hashinfo)) return -ENOMEM; return 0; } static __net_exit void raw_exit_net(struct net *net) { remove_proc_entry("raw", net->proc_net); } static __net_initdata struct pernet_operations raw_net_ops = { .init = raw_init_net, .exit = raw_exit_net, }; int __init raw_proc_init(void) { return register_pernet_subsys(&raw_net_ops); } void __init raw_proc_exit(void) { unregister_pernet_subsys(&raw_net_ops); } #endif /* CONFIG_PROC_FS */ static void raw_sysctl_init_net(struct net *net) { #ifdef CONFIG_NET_L3_MASTER_DEV net->ipv4.sysctl_raw_l3mdev_accept = 1; #endif } static int __net_init raw_sysctl_init(struct net *net) { raw_sysctl_init_net(net); return 0; } static struct pernet_operations __net_initdata raw_sysctl_ops = { .init = raw_sysctl_init, }; void __init raw_init(void) { raw_sysctl_init_net(&init_net); if (register_pernet_subsys(&raw_sysctl_ops)) panic("RAW: failed to init sysctl parameters.\n"); }
6 44 1 1 62 49 2 7 3 1 56 5 1 1 27 7 7 27 1 1 27 33 12 45 44 1 42 2 18 24 7 17 17 43 43 43 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 // SPDX-License-Identifier: GPL-2.0-or-later /* * CDC Ethernet based networking peripherals * Copyright (C) 2003-2005 by David Brownell * Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync) */ // #define DEBUG // error path messages, extra info // #define VERBOSE // more; success messages #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/workqueue.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <linux/usb/usbnet.h> #if IS_ENABLED(CONFIG_USB_NET_RNDIS_HOST) static int is_rndis(struct usb_interface_descriptor *desc) { return (desc->bInterfaceClass == USB_CLASS_COMM && desc->bInterfaceSubClass == 2 && desc->bInterfaceProtocol == 0xff); } static int is_activesync(struct usb_interface_descriptor *desc) { return (desc->bInterfaceClass == USB_CLASS_MISC && desc->bInterfaceSubClass == 1 && desc->bInterfaceProtocol == 1); } static int is_wireless_rndis(struct usb_interface_descriptor *desc) { return (desc->bInterfaceClass == USB_CLASS_WIRELESS_CONTROLLER && desc->bInterfaceSubClass == 1 && desc->bInterfaceProtocol == 3); } static int is_novatel_rndis(struct usb_interface_descriptor *desc) { return (desc->bInterfaceClass == USB_CLASS_MISC && desc->bInterfaceSubClass == 4 && desc->bInterfaceProtocol == 1); } #else #define is_rndis(desc) 0 #define is_activesync(desc) 0 #define is_wireless_rndis(desc) 0 #define is_novatel_rndis(desc) 0 #endif static const u8 mbm_guid[16] = { 0xa3, 0x17, 0xa8, 0x8b, 0x04, 0x5e, 0x4f, 0x01, 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a, }; void usbnet_cdc_update_filter(struct usbnet *dev) { struct net_device *net = dev->net; u16 cdc_filter = USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST; /* filtering on the device is an optional feature and not worth * the hassle so we just roughly care about snooping and if any * multicast is requested, we take every multicast */ if (net->flags & IFF_PROMISC) cdc_filter |= USB_CDC_PACKET_TYPE_PROMISCUOUS; if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) cdc_filter |= USB_CDC_PACKET_TYPE_ALL_MULTICAST; usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), USB_CDC_SET_ETHERNET_PACKET_FILTER, USB_TYPE_CLASS | USB_RECIP_INTERFACE, cdc_filter, dev->intf->cur_altsetting->desc.bInterfaceNumber, NULL, 0, USB_CTRL_SET_TIMEOUT ); } EXPORT_SYMBOL_GPL(usbnet_cdc_update_filter); /* We need to override usbnet_*_link_ksettings in bind() */ static const struct ethtool_ops cdc_ether_ethtool_ops = { .get_link = usbnet_get_link, .nway_reset = usbnet_nway_reset, .get_drvinfo = usbnet_get_drvinfo, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_ts_info = ethtool_op_get_ts_info, .get_link_ksettings = usbnet_get_link_ksettings_internal, .set_link_ksettings = NULL, }; /* probes control interface, claims data interface, collects the bulk * endpoints, activates data interface (if needed), maybe sets MTU. * all pure cdc, except for certain firmware workarounds, and knowing * that rndis uses one different rule. */ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) { u8 *buf = intf->cur_altsetting->extra; int len = intf->cur_altsetting->extralen; struct usb_interface_descriptor *d; struct cdc_state *info = (void *) &dev->data; int status; int rndis; bool android_rndis_quirk = false; struct usb_driver *driver = driver_of(intf); struct usb_cdc_parsed_header header; if (sizeof(dev->data) < sizeof(*info)) return -EDOM; /* expect strict spec conformance for the descriptors, but * cope with firmware which stores them in the wrong place */ if (len == 0 && dev->udev->actconfig->extralen) { /* Motorola SB4100 (and others: Brad Hards says it's * from a Broadcom design) put CDC descriptors here */ buf = dev->udev->actconfig->extra; len = dev->udev->actconfig->extralen; dev_dbg(&intf->dev, "CDC descriptors on config\n"); } /* Maybe CDC descriptors are after the endpoint? This bug has * been seen on some 2Wire Inc RNDIS-ish products. */ if (len == 0) { struct usb_host_endpoint *hep; hep = intf->cur_altsetting->endpoint; if (hep) { buf = hep->extra; len = hep->extralen; } if (len) dev_dbg(&intf->dev, "CDC descriptors on endpoint\n"); } /* this assumes that if there's a non-RNDIS vendor variant * of cdc-acm, it'll fail RNDIS requests cleanly. */ rndis = (is_rndis(&intf->cur_altsetting->desc) || is_activesync(&intf->cur_altsetting->desc) || is_wireless_rndis(&intf->cur_altsetting->desc) || is_novatel_rndis(&intf->cur_altsetting->desc)); memset(info, 0, sizeof(*info)); info->control = intf; cdc_parse_cdc_header(&header, intf, buf, len); info->u = header.usb_cdc_union_desc; info->header = header.usb_cdc_header_desc; info->ether = header.usb_cdc_ether_desc; if (!info->u) { if (rndis) goto skip; else /* in that case a quirk is mandatory */ goto bad_desc; } /* we need a master/control interface (what we're * probed with) and a slave/data interface; union * descriptors sort this all out. */ info->control = usb_ifnum_to_if(dev->udev, info->u->bMasterInterface0); info->data = usb_ifnum_to_if(dev->udev, info->u->bSlaveInterface0); if (!info->control || !info->data) { dev_dbg(&intf->dev, "master #%u/%p slave #%u/%p\n", info->u->bMasterInterface0, info->control, info->u->bSlaveInterface0, info->data); /* fall back to hard-wiring for RNDIS */ if (rndis) { android_rndis_quirk = true; goto skip; } goto bad_desc; } if (info->control != intf) { dev_dbg(&intf->dev, "bogus CDC Union\n"); /* Ambit USB Cable Modem (and maybe others) * interchanges master and slave interface. */ if (info->data == intf) { info->data = info->control; info->control = intf; } else goto bad_desc; } /* some devices merge these - skip class check */ if (info->control == info->data) goto skip; /* a data interface altsetting does the real i/o */ d = &info->data->cur_altsetting->desc; if (d->bInterfaceClass != USB_CLASS_CDC_DATA) { dev_dbg(&intf->dev, "slave class %u\n", d->bInterfaceClass); goto bad_desc; } skip: /* Communication class functions with bmCapabilities are not * RNDIS. But some Wireless class RNDIS functions use * bmCapabilities for their own purpose. The failsafe is * therefore applied only to Communication class RNDIS * functions. The rndis test is redundant, but a cheap * optimization. */ if (rndis && is_rndis(&intf->cur_altsetting->desc) && header.usb_cdc_acm_descriptor && header.usb_cdc_acm_descriptor->bmCapabilities) { dev_dbg(&intf->dev, "ACM capabilities %02x, not really RNDIS?\n", header.usb_cdc_acm_descriptor->bmCapabilities); goto bad_desc; } if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) { dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize); /* because of Zaurus, we may be ignoring the host * side link address we were given. */ } if (header.usb_cdc_mdlm_desc && memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) { dev_dbg(&intf->dev, "GUID doesn't match\n"); goto bad_desc; } if (header.usb_cdc_mdlm_detail_desc && header.usb_cdc_mdlm_detail_desc->bLength < (sizeof(struct usb_cdc_mdlm_detail_desc) + 1)) { dev_dbg(&intf->dev, "Descriptor too short\n"); goto bad_desc; } /* Microsoft ActiveSync based and some regular RNDIS devices lack the * CDC descriptors, so we'll hard-wire the interfaces and not check * for descriptors. * * Some Android RNDIS devices have a CDC Union descriptor pointing * to non-existing interfaces. Ignore that and attempt the same * hard-wired 0 and 1 interfaces. */ if (rndis && (!info->u || android_rndis_quirk)) { info->control = usb_ifnum_to_if(dev->udev, 0); info->data = usb_ifnum_to_if(dev->udev, 1); if (!info->control || !info->data || info->control != intf) { dev_dbg(&intf->dev, "rndis: master #0/%p slave #1/%p\n", info->control, info->data); goto bad_desc; } } else if (!info->header || (!rndis && !info->ether)) { dev_dbg(&intf->dev, "missing cdc %s%s%sdescriptor\n", info->header ? "" : "header ", info->u ? "" : "union ", info->ether ? "" : "ether "); goto bad_desc; } /* claim data interface and set it up ... with side effects. * network traffic can't flow until an altsetting is enabled. */ if (info->data != info->control) { status = usb_driver_claim_interface(driver, info->data, dev); if (status < 0) return status; } status = usbnet_get_endpoints(dev, info->data); if (status < 0) { /* ensure immediate exit from usbnet_disconnect */ usb_set_intfdata(info->data, NULL); if (info->data != info->control) usb_driver_release_interface(driver, info->data); return status; } /* status endpoint: optional for CDC Ethernet, not RNDIS (or ACM) */ if (info->data != info->control) dev->status = NULL; if (info->control->cur_altsetting->desc.bNumEndpoints == 1) { struct usb_endpoint_descriptor *desc; dev->status = &info->control->cur_altsetting->endpoint[0]; desc = &dev->status->desc; if (!usb_endpoint_is_int_in(desc) || (le16_to_cpu(desc->wMaxPacketSize) < sizeof(struct usb_cdc_notification)) || !desc->bInterval) { dev_dbg(&intf->dev, "bad notification endpoint\n"); dev->status = NULL; } } if (rndis && !dev->status) { dev_dbg(&intf->dev, "missing RNDIS status endpoint\n"); usb_set_intfdata(info->data, NULL); usb_driver_release_interface(driver, info->data); return -ENODEV; } /* override ethtool_ops */ dev->net->ethtool_ops = &cdc_ether_ethtool_ops; return 0; bad_desc: dev_info(&dev->udev->dev, "bad CDC descriptors\n"); return -ENODEV; } EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind); /* like usbnet_generic_cdc_bind() but handles filter initialization * correctly */ int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf) { int rv; rv = usbnet_generic_cdc_bind(dev, intf); if (rv < 0) goto bail_out; /* Some devices don't initialise properly. In particular * the packet filter is not reset. There are devices that * don't do reset all the way. So the packet filter should * be set to a sane initial value. */ usbnet_cdc_update_filter(dev); bail_out: return rv; } EXPORT_SYMBOL_GPL(usbnet_ether_cdc_bind); void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf) { struct cdc_state *info = (void *) &dev->data; struct usb_driver *driver = driver_of(intf); /* combined interface - nothing to do */ if (info->data == info->control) return; /* disconnect master --> disconnect slave */ if (intf == info->control && info->data) { /* ensure immediate exit from usbnet_disconnect */ usb_set_intfdata(info->data, NULL); usb_driver_release_interface(driver, info->data); info->data = NULL; } /* and vice versa (just in case) */ else if (intf == info->data && info->control) { /* ensure immediate exit from usbnet_disconnect */ usb_set_intfdata(info->control, NULL); usb_driver_release_interface(driver, info->control); info->control = NULL; } } EXPORT_SYMBOL_GPL(usbnet_cdc_unbind); /* Communications Device Class, Ethernet Control model * * Takes two interfaces. The DATA interface is inactive till an altsetting * is selected. Configuration data includes class descriptors. There's * an optional status endpoint on the control interface. * * This should interop with whatever the 2.4 "CDCEther.c" driver * (by Brad Hards) talked with, with more functionality. */ static void speed_change(struct usbnet *dev, __le32 *speeds) { dev->tx_speed = __le32_to_cpu(speeds[0]); dev->rx_speed = __le32_to_cpu(speeds[1]); } void usbnet_cdc_status(struct usbnet *dev, struct urb *urb) { struct usb_cdc_notification *event; if (urb->actual_length < sizeof(*event)) return; /* SPEED_CHANGE can get split into two 8-byte packets */ if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { speed_change(dev, (__le32 *) urb->transfer_buffer); return; } event = urb->transfer_buffer; switch (event->bNotificationType) { case USB_CDC_NOTIFY_NETWORK_CONNECTION: netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n", event->wValue ? "on" : "off"); if (netif_carrier_ok(dev->net) != !!event->wValue) usbnet_link_change(dev, !!event->wValue, 0); break; case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */ netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n", urb->actual_length); if (urb->actual_length != (sizeof(*event) + 8)) set_bit(EVENT_STS_SPLIT, &dev->flags); else speed_change(dev, (__le32 *) &event[1]); break; /* USB_CDC_NOTIFY_RESPONSE_AVAILABLE can happen too (e.g. RNDIS), * but there are no standard formats for the response data. */ default: netdev_err(dev->net, "CDC: unexpected notification %02x!\n", event->bNotificationType); break; } } EXPORT_SYMBOL_GPL(usbnet_cdc_status); int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf) { int status; struct cdc_state *info = (void *) &dev->data; BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct cdc_state))); status = usbnet_ether_cdc_bind(dev, intf); if (status < 0) return status; status = usbnet_get_ethernet_addr(dev, info->ether->iMACAddress); if (status < 0) { usb_set_intfdata(info->data, NULL); usb_driver_release_interface(driver_of(intf), info->data); return status; } return 0; } EXPORT_SYMBOL_GPL(usbnet_cdc_bind); static int usbnet_cdc_zte_bind(struct usbnet *dev, struct usb_interface *intf) { int status = usbnet_cdc_bind(dev, intf); if (!status && (dev->net->dev_addr[0] & 0x02)) eth_hw_addr_random(dev->net); return status; } /* Make sure packets have correct destination MAC address * * A firmware bug observed on some devices (ZTE MF823/831/910) is that the * device sends packets with a static, bogus, random MAC address (event if * device MAC address has been updated). Always set MAC address to that of the * device. */ int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { if (skb->len < ETH_HLEN || !(skb->data[0] & 0x02)) return 1; skb_reset_mac_header(skb); ether_addr_copy(eth_hdr(skb)->h_dest, dev->net->dev_addr); return 1; } EXPORT_SYMBOL_GPL(usbnet_cdc_zte_rx_fixup); /* Ensure correct link state * * Some devices (ZTE MF823/831/910) export two carrier on notifications when * connected. This causes the link state to be incorrect. Work around this by * always setting the state to off, then on. */ static void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb) { struct usb_cdc_notification *event; if (urb->actual_length < sizeof(*event)) return; event = urb->transfer_buffer; if (event->bNotificationType != USB_CDC_NOTIFY_NETWORK_CONNECTION) { usbnet_cdc_status(dev, urb); return; } netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n", event->wValue ? "on" : "off"); if (event->wValue && netif_carrier_ok(dev->net)) netif_carrier_off(dev->net); usbnet_link_change(dev, !!event->wValue, 0); } static const struct driver_info cdc_info = { .description = "CDC Ethernet Device", .flags = FLAG_ETHER | FLAG_POINTTOPOINT, .bind = usbnet_cdc_bind, .unbind = usbnet_cdc_unbind, .status = usbnet_cdc_status, .set_rx_mode = usbnet_cdc_update_filter, .manage_power = usbnet_manage_power, }; static const struct driver_info zte_cdc_info = { .description = "ZTE CDC Ethernet Device", .flags = FLAG_ETHER | FLAG_POINTTOPOINT, .bind = usbnet_cdc_zte_bind, .unbind = usbnet_cdc_unbind, .status = usbnet_cdc_zte_status, .set_rx_mode = usbnet_cdc_update_filter, .manage_power = usbnet_manage_power, .rx_fixup = usbnet_cdc_zte_rx_fixup, }; static const struct driver_info wwan_info = { .description = "Mobile Broadband Network Device", .flags = FLAG_WWAN, .bind = usbnet_cdc_bind, .unbind = usbnet_cdc_unbind, .status = usbnet_cdc_status, .set_rx_mode = usbnet_cdc_update_filter, .manage_power = usbnet_manage_power, }; /*-------------------------------------------------------------------------*/ #define HUAWEI_VENDOR_ID 0x12D1 #define NOVATEL_VENDOR_ID 0x1410 #define ZTE_VENDOR_ID 0x19D2 #define DELL_VENDOR_ID 0x413C #define REALTEK_VENDOR_ID 0x0bda #define SAMSUNG_VENDOR_ID 0x04e8 #define LENOVO_VENDOR_ID 0x17ef #define LINKSYS_VENDOR_ID 0x13b1 #define NVIDIA_VENDOR_ID 0x0955 #define HP_VENDOR_ID 0x03f0 #define MICROSOFT_VENDOR_ID 0x045e #define UBLOX_VENDOR_ID 0x1546 #define TPLINK_VENDOR_ID 0x2357 #define AQUANTIA_VENDOR_ID 0x2eca #define ASIX_VENDOR_ID 0x0b95 static const struct usb_device_id products[] = { /* BLACKLIST !! * * First blacklist any products that are egregiously nonconformant * with the CDC Ethernet specs. Minor braindamage we cope with; when * they're not even trying, needing a separate driver is only the first * of the differences to show up. */ #define ZAURUS_MASTER_INTERFACE \ .bInterfaceClass = USB_CLASS_COMM, \ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE #define ZAURUS_FAKE_INTERFACE \ .bInterfaceClass = USB_CLASS_COMM, \ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE /* SA-1100 based Sharp Zaurus ("collie"), or compatible; * wire-incompatible with true CDC Ethernet implementations. * (And, it seems, needlessly so...) */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x8004, ZAURUS_MASTER_INTERFACE, .driver_info = 0, }, /* PXA-25x based Sharp Zaurii. Note that it seems some of these * (later models especially) may have shipped only with firmware * advertising false "CDC MDLM" compatibility ... but we're not * clear which models did that, so for now let's assume the worst. */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x8005, /* A-300 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x8005, /* A-300 */ ZAURUS_FAKE_INTERFACE, .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x8006, /* B-500/SL-5600 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x8006, /* B-500/SL-5600 */ ZAURUS_FAKE_INTERFACE, .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x8007, /* C-700 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x8007, /* C-700 */ ZAURUS_FAKE_INTERFACE, .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x9031, /* C-750 C-760 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x9032, /* SL-6000 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x9032, /* SL-6000 */ ZAURUS_FAKE_INTERFACE, .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, /* reported with some C860 units */ .idProduct = 0x9050, /* C-860 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, }, /* Olympus has some models with a Zaurus-compatible option. * R-1000 uses a FreeScale i.MXL cpu (ARMv4T) */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x07B4, .idProduct = 0x0F02, /* R-1000 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, }, /* LG Electronics VL600 wants additional headers on every frame */ { USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* Logitech Harmony 900 - uses the pseudo-MDLM (BLAN) driver */ { USB_DEVICE_AND_INTERFACE_INFO(0x046d, 0xc11f, USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* Novatel USB551L and MC551 - handled by qmi_wwan */ { USB_DEVICE_AND_INTERFACE_INFO(NOVATEL_VENDOR_ID, 0xB001, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* Novatel E362 - handled by qmi_wwan */ { USB_DEVICE_AND_INTERFACE_INFO(NOVATEL_VENDOR_ID, 0x9010, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* Dell Wireless 5800 (Novatel E362) - handled by qmi_wwan */ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x8195, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* Dell Wireless 5800 (Novatel E362) - handled by qmi_wwan */ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x8196, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* Dell Wireless 5804 (Novatel E371) - handled by qmi_wwan */ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x819b, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* Novatel Expedite E371 - handled by qmi_wwan */ { USB_DEVICE_AND_INTERFACE_INFO(NOVATEL_VENDOR_ID, 0x9011, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* HP lt2523 (Novatel E371) - handled by qmi_wwan */ { USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* AnyDATA ADU960S - handled by qmi_wwan */ { USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* Huawei E1820 - handled by qmi_wwan */ { USB_DEVICE_INTERFACE_NUMBER(HUAWEI_VENDOR_ID, 0x14ac, 1), .driver_info = 0, }, /* Realtek RTL8153 Based USB 3.0 Ethernet Adapters */ { USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */ { USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x721e, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* Lenovo ThinkPad Hybrid USB-C with USB-A Dock (40af0135eu, based on Realtek RTL8153) */ { USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa359, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* Aquantia AQtion USB to 5GbE Controller (based on AQC111U) */ { USB_DEVICE_AND_INTERFACE_INFO(AQUANTIA_VENDOR_ID, 0xc101, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* ASIX USB 3.1 Gen1 to 5G Multi-Gigabit Ethernet Adapter(based on AQC111U) */ { USB_DEVICE_AND_INTERFACE_INFO(ASIX_VENDOR_ID, 0x2790, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* ASIX USB 3.1 Gen1 to 2.5G Multi-Gigabit Ethernet Adapter(based on AQC112U) */ { USB_DEVICE_AND_INTERFACE_INFO(ASIX_VENDOR_ID, 0x2791, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* USB-C 3.1 to 5GBASE-T Ethernet Adapter (based on AQC111U) */ { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0xe05a, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* QNAP QNA-UC5G1T USB to 5GbE Adapter (based on AQC111U) */ { USB_DEVICE_AND_INTERFACE_INFO(0x1c04, 0x0015, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, /* WHITELIST!!! * * CDC Ether uses two interfaces, not necessarily consecutive. * We match the main interface, ignoring the optional device * class so we could handle devices that aren't exclusively * CDC ether. * * NOTE: this match must come AFTER entries blacklisting devices * because of bugs/quirks in a given product (like Zaurus, above). */ { /* ZTE (Vodafone) K3805-Z */ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1003, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { /* ZTE (Vodafone) K3806-Z */ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1015, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { /* ZTE (Vodafone) K4510-Z */ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1173, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { /* ZTE (Vodafone) K3770-Z */ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1177, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { /* ZTE (Vodafone) K3772-Z */ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1181, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { /* Telit modules */ USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (kernel_ulong_t) &wwan_info, }, { /* Dell DW5580 modules */ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x81ba, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (kernel_ulong_t)&wwan_info, }, { /* Huawei ME906 and ME909 */ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x15c1, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { /* ZTE modules */ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&zte_cdc_info, }, { /* U-blox TOBY-L2 */ USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1143, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { /* U-blox SARA-U2 */ USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1104, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { /* U-blox LARA-R6 01B */ USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1313, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { /* U-blox LARA-L6 */ USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1343, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { /* Cinterion PLS8 modem by GEMALTO */ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0061, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { /* Cinterion AHS3 modem by GEMALTO */ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0055, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { /* Cinterion PLS62-W modem by GEMALTO/THALES */ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x005b, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { /* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long) &cdc_info, }, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { /* Various Huawei modems with a network port like the UMG1831 */ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, 255), .driver_info = (unsigned long)&wwan_info, }, { }, /* END */ }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver cdc_driver = { .name = "cdc_ether", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .reset_resume = usbnet_resume, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; module_usb_driver(cdc_driver); MODULE_AUTHOR("David Brownell"); MODULE_DESCRIPTION("USB CDC Ethernet devices"); MODULE_LICENSE("GPL");
10 67 67 212 1 213 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 // SPDX-License-Identifier: GPL-2.0-only /* * Support KVM gust page tracking * * This feature allows us to track page access in guest. Currently, only * write access is tracked. * * Copyright(C) 2015 Intel Corporation. * * Author: * Xiao Guangrong <guangrong.xiao@linux.intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/lockdep.h> #include <linux/kvm_host.h> #include <linux/rculist.h> #include "mmu.h" #include "mmu_internal.h" #include "page_track.h" static bool kvm_external_write_tracking_enabled(struct kvm *kvm) { #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING /* * Read external_write_tracking_enabled before related pointers. Pairs * with the smp_store_release in kvm_page_track_write_tracking_enable(). */ return smp_load_acquire(&kvm->arch.external_write_tracking_enabled); #else return false; #endif } bool kvm_page_track_write_tracking_enabled(struct kvm *kvm) { return kvm_external_write_tracking_enabled(kvm) || kvm_shadow_root_allocated(kvm) || !tdp_enabled; } void kvm_page_track_free_memslot(struct kvm_memory_slot *slot) { vfree(slot->arch.gfn_write_track); slot->arch.gfn_write_track = NULL; } static int __kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot, unsigned long npages) { const size_t size = sizeof(*slot->arch.gfn_write_track); if (!slot->arch.gfn_write_track) slot->arch.gfn_write_track = __vcalloc(npages, size, GFP_KERNEL_ACCOUNT); return slot->arch.gfn_write_track ? 0 : -ENOMEM; } int kvm_page_track_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) { if (!kvm_page_track_write_tracking_enabled(kvm)) return 0; return __kvm_page_track_write_tracking_alloc(slot, npages); } int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot) { return __kvm_page_track_write_tracking_alloc(slot, slot->npages); } static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn, short count) { int index, val; index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); val = slot->arch.gfn_write_track[index]; if (WARN_ON_ONCE(val + count < 0 || val + count > USHRT_MAX)) return; slot->arch.gfn_write_track[index] += count; } void __kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) { lockdep_assert_held_write(&kvm->mmu_lock); lockdep_assert_once(lockdep_is_held(&kvm->slots_lock) || srcu_read_lock_held(&kvm->srcu)); if (KVM_BUG_ON(!kvm_page_track_write_tracking_enabled(kvm), kvm)) return; update_gfn_write_track(slot, gfn, 1); /* * new track stops large page mapping for the * tracked page. */ kvm_mmu_gfn_disallow_lpage(slot, gfn); if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) kvm_flush_remote_tlbs(kvm); } void __kvm_write_track_remove_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) { lockdep_assert_held_write(&kvm->mmu_lock); lockdep_assert_once(lockdep_is_held(&kvm->slots_lock) || srcu_read_lock_held(&kvm->srcu)); if (KVM_BUG_ON(!kvm_page_track_write_tracking_enabled(kvm), kvm)) return; update_gfn_write_track(slot, gfn, -1); /* * allow large page mapping for the tracked page * after the tracker is gone. */ kvm_mmu_gfn_allow_lpage(slot, gfn); } /* * check if the corresponding access on the specified guest page is tracked. */ bool kvm_gfn_is_write_tracked(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn) { int index; if (!slot) return false; if (!kvm_page_track_write_tracking_enabled(kvm)) return false; index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); return !!READ_ONCE(slot->arch.gfn_write_track[index]); } #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING void kvm_page_track_cleanup(struct kvm *kvm) { struct kvm_page_track_notifier_head *head; head = &kvm->arch.track_notifier_head; cleanup_srcu_struct(&head->track_srcu); } int kvm_page_track_init(struct kvm *kvm) { struct kvm_page_track_notifier_head *head; head = &kvm->arch.track_notifier_head; INIT_HLIST_HEAD(&head->track_notifier_list); return init_srcu_struct(&head->track_srcu); } static int kvm_enable_external_write_tracking(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *slot; int r = 0, i, bkt; if (kvm->arch.vm_type == KVM_X86_TDX_VM) return -EOPNOTSUPP; mutex_lock(&kvm->slots_arch_lock); /* * Check for *any* write tracking user (not just external users) under * lock. This avoids unnecessary work, e.g. if KVM itself is using * write tracking, or if two external users raced when registering. */ if (kvm_page_track_write_tracking_enabled(kvm)) goto out_success; for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { slots = __kvm_memslots(kvm, i); kvm_for_each_memslot(slot, bkt, slots) { /* * Intentionally do NOT free allocations on failure to * avoid having to track which allocations were made * now versus when the memslot was created. The * metadata is guaranteed to be freed when the slot is * freed, and will be kept/used if userspace retries * the failed ioctl() instead of killing the VM. */ r = kvm_page_track_write_tracking_alloc(slot); if (r) goto out_unlock; } } out_success: /* * Ensure that external_write_tracking_enabled becomes true strictly * after all the related pointers are set. */ smp_store_release(&kvm->arch.external_write_tracking_enabled, true); out_unlock: mutex_unlock(&kvm->slots_arch_lock); return r; } /* * register the notifier so that event interception for the tracked guest * pages can be received. */ int kvm_page_track_register_notifier(struct kvm *kvm, struct kvm_page_track_notifier_node *n) { struct kvm_page_track_notifier_head *head; int r; if (!kvm || kvm->mm != current->mm) return -ESRCH; if (!kvm_external_write_tracking_enabled(kvm)) { r = kvm_enable_external_write_tracking(kvm); if (r) return r; } kvm_get_kvm(kvm); head = &kvm->arch.track_notifier_head; write_lock(&kvm->mmu_lock); hlist_add_head_rcu(&n->node, &head->track_notifier_list); write_unlock(&kvm->mmu_lock); return 0; } EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier); /* * stop receiving the event interception. It is the opposed operation of * kvm_page_track_register_notifier(). */ void kvm_page_track_unregister_notifier(struct kvm *kvm, struct kvm_page_track_notifier_node *n) { struct kvm_page_track_notifier_head *head; head = &kvm->arch.track_notifier_head; write_lock(&kvm->mmu_lock); hlist_del_rcu(&n->node); write_unlock(&kvm->mmu_lock); synchronize_srcu(&head->track_srcu); kvm_put_kvm(kvm); } EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier); /* * Notify the node that write access is intercepted and write emulation is * finished at this time. * * The node should figure out if the written page is the one that node is * interested in by itself. */ void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes) { struct kvm_page_track_notifier_head *head; struct kvm_page_track_notifier_node *n; int idx; head = &kvm->arch.track_notifier_head; if (hlist_empty(&head->track_notifier_list)) return; idx = srcu_read_lock(&head->track_srcu); hlist_for_each_entry_srcu(n, &head->track_notifier_list, node, srcu_read_lock_held(&head->track_srcu)) if (n->track_write) n->track_write(gpa, new, bytes, n); srcu_read_unlock(&head->track_srcu, idx); } /* * Notify external page track nodes that a memory region is being removed from * the VM, e.g. so that users can free any associated metadata. */ void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot) { struct kvm_page_track_notifier_head *head; struct kvm_page_track_notifier_node *n; int idx; head = &kvm->arch.track_notifier_head; if (hlist_empty(&head->track_notifier_list)) return; idx = srcu_read_lock(&head->track_srcu); hlist_for_each_entry_srcu(n, &head->track_notifier_list, node, srcu_read_lock_held(&head->track_srcu)) if (n->track_remove_region) n->track_remove_region(slot->base_gfn, slot->npages, n); srcu_read_unlock(&head->track_srcu, idx); } /* * add guest page to the tracking pool so that corresponding access on that * page will be intercepted. * * @kvm: the guest instance we are interested in. * @gfn: the guest page. */ int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *slot; int idx; idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); if (!slot) { srcu_read_unlock(&kvm->srcu, idx); return -EINVAL; } write_lock(&kvm->mmu_lock); __kvm_write_track_add_gfn(kvm, slot, gfn); write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); return 0; } EXPORT_SYMBOL_GPL(kvm_write_track_add_gfn); /* * remove the guest page from the tracking pool which stops the interception * of corresponding access on that page. * * @kvm: the guest instance we are interested in. * @gfn: the guest page. */ int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *slot; int idx; idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); if (!slot) { srcu_read_unlock(&kvm->srcu, idx); return -EINVAL; } write_lock(&kvm->mmu_lock); __kvm_write_track_remove_gfn(kvm, slot, gfn); write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); return 0; } EXPORT_SYMBOL_GPL(kvm_write_track_remove_gfn); #endif
8 2 6 5 4 1 6 1 1 1 1 1 2 16 4 4 1 1 3 2 1 3 12 1 6 5 1 111 111 111 1 12 1 12 12 12 14 1 13 12 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1994 Linus Torvalds * * Cyrix stuff, June 1998 by: * - Rafael R. Reilova (moved everything from head.S), * <rreilova@ececs.uc.edu> * - Channing Corn (tests & fixes), * - Andrew D. Balsa (code cleanup). */ #include <linux/init.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/nospec.h> #include <linux/prctl.h> #include <linux/sched/smt.h> #include <linux/pgtable.h> #include <linux/bpf.h> #include <asm/spec-ctrl.h> #include <asm/cmdline.h> #include <asm/bugs.h> #include <asm/processor.h> #include <asm/processor-flags.h> #include <asm/fpu/api.h> #include <asm/msr.h> #include <asm/vmx.h> #include <asm/paravirt.h> #include <asm/cpu_device_id.h> #include <asm/e820/api.h> #include <asm/hypervisor.h> #include <asm/tlbflush.h> #include <asm/cpu.h> #include "cpu.h" /* * Speculation Vulnerability Handling * * Each vulnerability is handled with the following functions: * <vuln>_select_mitigation() -- Selects a mitigation to use. This should * take into account all relevant command line * options. * <vuln>_update_mitigation() -- This is called after all vulnerabilities have * selected a mitigation, in case the selection * may want to change based on other choices * made. This function is optional. * <vuln>_apply_mitigation() -- Enable the selected mitigation. * * The compile-time mitigation in all cases should be AUTO. An explicit * command-line option can override AUTO. If no such option is * provided, <vuln>_select_mitigation() will override AUTO to the best * mitigation option. */ static void __init spectre_v1_select_mitigation(void); static void __init spectre_v1_apply_mitigation(void); static void __init spectre_v2_select_mitigation(void); static void __init spectre_v2_update_mitigation(void); static void __init spectre_v2_apply_mitigation(void); static void __init retbleed_select_mitigation(void); static void __init retbleed_update_mitigation(void); static void __init retbleed_apply_mitigation(void); static void __init spectre_v2_user_select_mitigation(void); static void __init spectre_v2_user_update_mitigation(void); static void __init spectre_v2_user_apply_mitigation(void); static void __init ssb_select_mitigation(void); static void __init ssb_apply_mitigation(void); static void __init l1tf_select_mitigation(void); static void __init l1tf_apply_mitigation(void); static void __init mds_select_mitigation(void); static void __init mds_update_mitigation(void); static void __init mds_apply_mitigation(void); static void __init taa_select_mitigation(void); static void __init taa_update_mitigation(void); static void __init taa_apply_mitigation(void); static void __init mmio_select_mitigation(void); static void __init mmio_update_mitigation(void); static void __init mmio_apply_mitigation(void); static void __init rfds_select_mitigation(void); static void __init rfds_update_mitigation(void); static void __init rfds_apply_mitigation(void); static void __init srbds_select_mitigation(void); static void __init srbds_apply_mitigation(void); static void __init l1d_flush_select_mitigation(void); static void __init srso_select_mitigation(void); static void __init srso_update_mitigation(void); static void __init srso_apply_mitigation(void); static void __init gds_select_mitigation(void); static void __init gds_apply_mitigation(void); static void __init bhi_select_mitigation(void); static void __init bhi_update_mitigation(void); static void __init bhi_apply_mitigation(void); static void __init its_select_mitigation(void); static void __init its_update_mitigation(void); static void __init its_apply_mitigation(void); static void __init tsa_select_mitigation(void); static void __init tsa_apply_mitigation(void); static void __init vmscape_select_mitigation(void); static void __init vmscape_update_mitigation(void); static void __init vmscape_apply_mitigation(void); /* The base value of the SPEC_CTRL MSR without task-specific bits set */ u64 x86_spec_ctrl_base; EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); /* The current value of the SPEC_CTRL MSR with task-specific bits set */ DEFINE_PER_CPU(u64, x86_spec_ctrl_current); EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current); /* * Set when the CPU has run a potentially malicious guest. An IBPB will * be needed to before running userspace. That IBPB will flush the branch * predictor content. */ DEFINE_PER_CPU(bool, x86_ibpb_exit_to_user); EXPORT_PER_CPU_SYMBOL_GPL(x86_ibpb_exit_to_user); u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; static u64 __ro_after_init x86_arch_cap_msr; static DEFINE_MUTEX(spec_ctrl_mutex); void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk; static void __init set_return_thunk(void *thunk) { x86_return_thunk = thunk; pr_info("active return thunk: %ps\n", thunk); } /* Update SPEC_CTRL MSR and its cached copy unconditionally */ static void update_spec_ctrl(u64 val) { this_cpu_write(x86_spec_ctrl_current, val); wrmsrq(MSR_IA32_SPEC_CTRL, val); } /* * Keep track of the SPEC_CTRL MSR value for the current task, which may differ * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). */ void update_spec_ctrl_cond(u64 val) { if (this_cpu_read(x86_spec_ctrl_current) == val) return; this_cpu_write(x86_spec_ctrl_current, val); /* * When KERNEL_IBRS this MSR is written on return-to-user, unless * forced the update can be delayed until that time. */ if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) wrmsrq(MSR_IA32_SPEC_CTRL, val); } noinstr u64 spec_ctrl_current(void) { return this_cpu_read(x86_spec_ctrl_current); } EXPORT_SYMBOL_GPL(spec_ctrl_current); /* * AMD specific MSR info for Speculative Store Bypass control. * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). */ u64 __ro_after_init x86_amd_ls_cfg_base; u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; /* Control conditional STIBP in switch_to() */ DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); /* Control conditional IBPB in switch_mm() */ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); /* Control unconditional IBPB in switch_mm() */ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); /* Control IBPB on vCPU load */ DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb); EXPORT_SYMBOL_GPL(switch_vcpu_ibpb); /* Control CPU buffer clear before idling (halt, mwait) */ DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear); EXPORT_SYMBOL_GPL(cpu_buf_idle_clear); /* * Controls whether l1d flush based mitigations are enabled, * based on hw features and admin setting via boot parameter * defaults to false */ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); /* * Controls CPU Fill buffer clear before VMenter. This is a subset of * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only * mitigation is required. */ DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear); EXPORT_SYMBOL_GPL(cpu_buf_vm_clear); #undef pr_fmt #define pr_fmt(fmt) "mitigations: " fmt static void __init cpu_print_attack_vectors(void) { pr_info("Enabled attack vectors: "); if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL)) pr_cont("user_kernel, "); if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) pr_cont("user_user, "); if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) pr_cont("guest_host, "); if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) pr_cont("guest_guest, "); pr_cont("SMT mitigations: "); switch (smt_mitigations) { case SMT_MITIGATIONS_OFF: pr_cont("off\n"); break; case SMT_MITIGATIONS_AUTO: pr_cont("auto\n"); break; case SMT_MITIGATIONS_ON: pr_cont("on\n"); } } void __init cpu_select_mitigations(void) { /* * Read the SPEC_CTRL MSR to account for reserved bits which may * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD * init code as it is not enumerated and depends on the family. */ if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); /* * Previously running kernel (kexec), may have some controls * turned ON. Clear them and let the mitigations setup below * rediscover them based on configuration. */ x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; } x86_arch_cap_msr = x86_read_arch_cap_msr(); cpu_print_attack_vectors(); /* Select the proper CPU mitigations before patching alternatives: */ spectre_v1_select_mitigation(); spectre_v2_select_mitigation(); retbleed_select_mitigation(); spectre_v2_user_select_mitigation(); ssb_select_mitigation(); l1tf_select_mitigation(); mds_select_mitigation(); taa_select_mitigation(); mmio_select_mitigation(); rfds_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); srso_select_mitigation(); gds_select_mitigation(); its_select_mitigation(); bhi_select_mitigation(); tsa_select_mitigation(); vmscape_select_mitigation(); /* * After mitigations are selected, some may need to update their * choices. */ spectre_v2_update_mitigation(); /* * retbleed_update_mitigation() relies on the state set by * spectre_v2_update_mitigation(); specifically it wants to know about * spectre_v2=ibrs. */ retbleed_update_mitigation(); /* * its_update_mitigation() depends on spectre_v2_update_mitigation() * and retbleed_update_mitigation(). */ its_update_mitigation(); /* * spectre_v2_user_update_mitigation() depends on * retbleed_update_mitigation(), specifically the STIBP * selection is forced for UNRET or IBPB. */ spectre_v2_user_update_mitigation(); mds_update_mitigation(); taa_update_mitigation(); mmio_update_mitigation(); rfds_update_mitigation(); bhi_update_mitigation(); /* srso_update_mitigation() depends on retbleed_update_mitigation(). */ srso_update_mitigation(); vmscape_update_mitigation(); spectre_v1_apply_mitigation(); spectre_v2_apply_mitigation(); retbleed_apply_mitigation(); spectre_v2_user_apply_mitigation(); ssb_apply_mitigation(); l1tf_apply_mitigation(); mds_apply_mitigation(); taa_apply_mitigation(); mmio_apply_mitigation(); rfds_apply_mitigation(); srbds_apply_mitigation(); srso_apply_mitigation(); gds_apply_mitigation(); its_apply_mitigation(); bhi_apply_mitigation(); tsa_apply_mitigation(); vmscape_apply_mitigation(); } /* * NOTE: This function is *only* called for SVM, since Intel uses * MSR_IA32_SPEC_CTRL for SSBD. */ void x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest) { u64 guestval, hostval; struct thread_info *ti = current_thread_info(); /* * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. */ if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && !static_cpu_has(X86_FEATURE_VIRT_SSBD)) return; /* * If the host has SSBD mitigation enabled, force it in the host's * virtual MSR value. If its not permanently enabled, evaluate * current's TIF_SSBD thread flag. */ if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) hostval = SPEC_CTRL_SSBD; else hostval = ssbd_tif_to_spec_ctrl(ti->flags); /* Sanitize the guest value */ guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; if (hostval != guestval) { unsigned long tif; tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : ssbd_spec_ctrl_to_tif(hostval); speculation_ctrl_update(tif); } } EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); static void x86_amd_ssb_disable(void) { u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) wrmsrq(MSR_AMD64_LS_CFG, msrval); } #undef pr_fmt #define pr_fmt(fmt) "MDS: " fmt /* * Returns true if vulnerability should be mitigated based on the * selected attack vector controls. * * See Documentation/admin-guide/hw-vuln/attack_vector_controls.rst */ static bool __init should_mitigate_vuln(unsigned int bug) { switch (bug) { /* * The only runtime-selected spectre_v1 mitigations in the kernel are * related to SWAPGS protection on kernel entry. Therefore, protection * is only required for the user->kernel attack vector. */ case X86_BUG_SPECTRE_V1: return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL); case X86_BUG_SPECTRE_V2: case X86_BUG_RETBLEED: case X86_BUG_L1TF: case X86_BUG_ITS: return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST); case X86_BUG_SPECTRE_V2_USER: return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST); /* * All the vulnerabilities below allow potentially leaking data * across address spaces. Therefore, mitigation is required for * any of these 4 attack vectors. */ case X86_BUG_MDS: case X86_BUG_TAA: case X86_BUG_MMIO_STALE_DATA: case X86_BUG_RFDS: case X86_BUG_SRBDS: return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST); case X86_BUG_GDS: return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) || (smt_mitigations != SMT_MITIGATIONS_OFF); case X86_BUG_SPEC_STORE_BYPASS: return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER); case X86_BUG_VMSCAPE: return cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST); default: WARN(1, "Unknown bug %x\n", bug); return false; } } /* Default mitigation for MDS-affected CPUs */ static enum mds_mitigations mds_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF; static bool mds_nosmt __ro_after_init = false; static const char * const mds_strings[] = { [MDS_MITIGATION_OFF] = "Vulnerable", [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", }; enum taa_mitigations { TAA_MITIGATION_OFF, TAA_MITIGATION_AUTO, TAA_MITIGATION_UCODE_NEEDED, TAA_MITIGATION_VERW, TAA_MITIGATION_TSX_DISABLED, }; /* Default mitigation for TAA-affected CPUs */ static enum taa_mitigations taa_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF; enum mmio_mitigations { MMIO_MITIGATION_OFF, MMIO_MITIGATION_AUTO, MMIO_MITIGATION_UCODE_NEEDED, MMIO_MITIGATION_VERW, }; /* Default mitigation for Processor MMIO Stale Data vulnerabilities */ static enum mmio_mitigations mmio_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF; enum rfds_mitigations { RFDS_MITIGATION_OFF, RFDS_MITIGATION_AUTO, RFDS_MITIGATION_VERW, RFDS_MITIGATION_UCODE_NEEDED, }; /* Default mitigation for Register File Data Sampling */ static enum rfds_mitigations rfds_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF; /* * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry. */ static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init; static void __init mds_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_MDS)) { mds_mitigation = MDS_MITIGATION_OFF; return; } if (mds_mitigation == MDS_MITIGATION_AUTO) { if (should_mitigate_vuln(X86_BUG_MDS)) mds_mitigation = MDS_MITIGATION_FULL; else mds_mitigation = MDS_MITIGATION_OFF; } if (mds_mitigation == MDS_MITIGATION_OFF) return; verw_clear_cpu_buf_mitigation_selected = true; } static void __init mds_update_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_MDS)) return; /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */ if (verw_clear_cpu_buf_mitigation_selected) mds_mitigation = MDS_MITIGATION_FULL; if (mds_mitigation == MDS_MITIGATION_FULL) { if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) mds_mitigation = MDS_MITIGATION_VMWERV; } pr_info("%s\n", mds_strings[mds_mitigation]); } static void __init mds_apply_mitigation(void) { if (mds_mitigation == MDS_MITIGATION_FULL || mds_mitigation == MDS_MITIGATION_VMWERV) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && (mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)) cpu_smt_disable(false); } } static int __init mds_cmdline(char *str) { if (!boot_cpu_has_bug(X86_BUG_MDS)) return 0; if (!str) return -EINVAL; if (!strcmp(str, "off")) mds_mitigation = MDS_MITIGATION_OFF; else if (!strcmp(str, "full")) mds_mitigation = MDS_MITIGATION_FULL; else if (!strcmp(str, "full,nosmt")) { mds_mitigation = MDS_MITIGATION_FULL; mds_nosmt = true; } return 0; } early_param("mds", mds_cmdline); #undef pr_fmt #define pr_fmt(fmt) "TAA: " fmt static bool taa_nosmt __ro_after_init; static const char * const taa_strings[] = { [TAA_MITIGATION_OFF] = "Vulnerable", [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", }; static bool __init taa_vulnerable(void) { return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM); } static void __init taa_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_TAA)) { taa_mitigation = TAA_MITIGATION_OFF; return; } /* TSX previously disabled by tsx=off */ if (!boot_cpu_has(X86_FEATURE_RTM)) { taa_mitigation = TAA_MITIGATION_TSX_DISABLED; return; } /* Microcode will be checked in taa_update_mitigation(). */ if (taa_mitigation == TAA_MITIGATION_AUTO) { if (should_mitigate_vuln(X86_BUG_TAA)) taa_mitigation = TAA_MITIGATION_VERW; else taa_mitigation = TAA_MITIGATION_OFF; } if (taa_mitigation != TAA_MITIGATION_OFF) verw_clear_cpu_buf_mitigation_selected = true; } static void __init taa_update_mitigation(void) { if (!taa_vulnerable()) return; if (verw_clear_cpu_buf_mitigation_selected) taa_mitigation = TAA_MITIGATION_VERW; if (taa_mitigation == TAA_MITIGATION_VERW) { /* Check if the requisite ucode is available. */ if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; /* * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. * A microcode update fixes this behavior to clear CPU buffers. It also * adds support for MSR_IA32_TSX_CTRL which is enumerated by the * ARCH_CAP_TSX_CTRL_MSR bit. * * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode * update is required. */ if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)) taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; } pr_info("%s\n", taa_strings[taa_mitigation]); } static void __init taa_apply_mitigation(void) { if (taa_mitigation == TAA_MITIGATION_VERW || taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) { /* * TSX is enabled, select alternate mitigation for TAA which is * the same as MDS. Enable MDS static branch to clear CPU buffers. * * For guests that can't determine whether the correct microcode is * present on host, enable the mitigation for UCODE_NEEDED as well. */ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON) cpu_smt_disable(false); } } static int __init tsx_async_abort_parse_cmdline(char *str) { if (!boot_cpu_has_bug(X86_BUG_TAA)) return 0; if (!str) return -EINVAL; if (!strcmp(str, "off")) { taa_mitigation = TAA_MITIGATION_OFF; } else if (!strcmp(str, "full")) { taa_mitigation = TAA_MITIGATION_VERW; } else if (!strcmp(str, "full,nosmt")) { taa_mitigation = TAA_MITIGATION_VERW; taa_nosmt = true; } return 0; } early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); #undef pr_fmt #define pr_fmt(fmt) "MMIO Stale Data: " fmt static bool mmio_nosmt __ro_after_init = false; static const char * const mmio_strings[] = { [MMIO_MITIGATION_OFF] = "Vulnerable", [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", }; static void __init mmio_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { mmio_mitigation = MMIO_MITIGATION_OFF; return; } /* Microcode will be checked in mmio_update_mitigation(). */ if (mmio_mitigation == MMIO_MITIGATION_AUTO) { if (should_mitigate_vuln(X86_BUG_MMIO_STALE_DATA)) mmio_mitigation = MMIO_MITIGATION_VERW; else mmio_mitigation = MMIO_MITIGATION_OFF; } if (mmio_mitigation == MMIO_MITIGATION_OFF) return; /* * Enable CPU buffer clear mitigation for host and VMM, if also affected * by MDS or TAA. */ if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable()) verw_clear_cpu_buf_mitigation_selected = true; } static void __init mmio_update_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) return; if (verw_clear_cpu_buf_mitigation_selected) mmio_mitigation = MMIO_MITIGATION_VERW; if (mmio_mitigation == MMIO_MITIGATION_VERW) { /* * Check if the system has the right microcode. * * CPU Fill buffer clear mitigation is enumerated by either an explicit * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS * affected systems. */ if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) || (boot_cpu_has(X86_FEATURE_MD_CLEAR) && boot_cpu_has(X86_FEATURE_FLUSH_L1D) && !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))) mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; } pr_info("%s\n", mmio_strings[mmio_mitigation]); } static void __init mmio_apply_mitigation(void) { if (mmio_mitigation == MMIO_MITIGATION_OFF) return; /* * Only enable the VMM mitigation if the CPU buffer clear mitigation is * not being used. */ if (verw_clear_cpu_buf_mitigation_selected) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); static_branch_disable(&cpu_buf_vm_clear); } else { static_branch_enable(&cpu_buf_vm_clear); } /* * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can * be propagated to uncore buffers, clearing the Fill buffers on idle * is required irrespective of SMT state. */ if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) static_branch_enable(&cpu_buf_idle_clear); if (mmio_nosmt || smt_mitigations == SMT_MITIGATIONS_ON) cpu_smt_disable(false); } static int __init mmio_stale_data_parse_cmdline(char *str) { if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) return 0; if (!str) return -EINVAL; if (!strcmp(str, "off")) { mmio_mitigation = MMIO_MITIGATION_OFF; } else if (!strcmp(str, "full")) { mmio_mitigation = MMIO_MITIGATION_VERW; } else if (!strcmp(str, "full,nosmt")) { mmio_mitigation = MMIO_MITIGATION_VERW; mmio_nosmt = true; } return 0; } early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); #undef pr_fmt #define pr_fmt(fmt) "Register File Data Sampling: " fmt static const char * const rfds_strings[] = { [RFDS_MITIGATION_OFF] = "Vulnerable", [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File", [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", }; static inline bool __init verw_clears_cpu_reg_file(void) { return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR); } static void __init rfds_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_RFDS)) { rfds_mitigation = RFDS_MITIGATION_OFF; return; } if (rfds_mitigation == RFDS_MITIGATION_AUTO) { if (should_mitigate_vuln(X86_BUG_RFDS)) rfds_mitigation = RFDS_MITIGATION_VERW; else rfds_mitigation = RFDS_MITIGATION_OFF; } if (rfds_mitigation == RFDS_MITIGATION_OFF) return; if (verw_clears_cpu_reg_file()) verw_clear_cpu_buf_mitigation_selected = true; } static void __init rfds_update_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_RFDS)) return; if (verw_clear_cpu_buf_mitigation_selected) rfds_mitigation = RFDS_MITIGATION_VERW; if (rfds_mitigation == RFDS_MITIGATION_VERW) { if (!verw_clears_cpu_reg_file()) rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED; } pr_info("%s\n", rfds_strings[rfds_mitigation]); } static void __init rfds_apply_mitigation(void) { if (rfds_mitigation == RFDS_MITIGATION_VERW) setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); } static __init int rfds_parse_cmdline(char *str) { if (!str) return -EINVAL; if (!boot_cpu_has_bug(X86_BUG_RFDS)) return 0; if (!strcmp(str, "off")) rfds_mitigation = RFDS_MITIGATION_OFF; else if (!strcmp(str, "on")) rfds_mitigation = RFDS_MITIGATION_VERW; return 0; } early_param("reg_file_data_sampling", rfds_parse_cmdline); #undef pr_fmt #define pr_fmt(fmt) "SRBDS: " fmt enum srbds_mitigations { SRBDS_MITIGATION_OFF, SRBDS_MITIGATION_AUTO, SRBDS_MITIGATION_UCODE_NEEDED, SRBDS_MITIGATION_FULL, SRBDS_MITIGATION_TSX_OFF, SRBDS_MITIGATION_HYPERVISOR, }; static enum srbds_mitigations srbds_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF; static const char * const srbds_strings[] = { [SRBDS_MITIGATION_OFF] = "Vulnerable", [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", }; static bool srbds_off; void update_srbds_msr(void) { u64 mcu_ctrl; if (!boot_cpu_has_bug(X86_BUG_SRBDS)) return; if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) return; if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) return; /* * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX * being disabled and it hasn't received the SRBDS MSR microcode. */ if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) return; rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); switch (srbds_mitigation) { case SRBDS_MITIGATION_OFF: case SRBDS_MITIGATION_TSX_OFF: mcu_ctrl |= RNGDS_MITG_DIS; break; case SRBDS_MITIGATION_FULL: mcu_ctrl &= ~RNGDS_MITG_DIS; break; default: break; } wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); } static void __init srbds_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_SRBDS)) { srbds_mitigation = SRBDS_MITIGATION_OFF; return; } if (srbds_mitigation == SRBDS_MITIGATION_AUTO) { if (should_mitigate_vuln(X86_BUG_SRBDS)) srbds_mitigation = SRBDS_MITIGATION_FULL; else { srbds_mitigation = SRBDS_MITIGATION_OFF; return; } } /* * Check to see if this is one of the MDS_NO systems supporting TSX that * are only exposed to SRBDS when TSX is enabled or when CPU is affected * by Processor MMIO Stale Data vulnerability. */ if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; else if (srbds_off) srbds_mitigation = SRBDS_MITIGATION_OFF; pr_info("%s\n", srbds_strings[srbds_mitigation]); } static void __init srbds_apply_mitigation(void) { update_srbds_msr(); } static int __init srbds_parse_cmdline(char *str) { if (!str) return -EINVAL; if (!boot_cpu_has_bug(X86_BUG_SRBDS)) return 0; srbds_off = !strcmp(str, "off"); return 0; } early_param("srbds", srbds_parse_cmdline); #undef pr_fmt #define pr_fmt(fmt) "L1D Flush : " fmt enum l1d_flush_mitigations { L1D_FLUSH_OFF = 0, L1D_FLUSH_ON, }; static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF; static void __init l1d_flush_select_mitigation(void) { if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) return; static_branch_enable(&switch_mm_cond_l1d_flush); pr_info("Conditional flush on switch_mm() enabled\n"); } static int __init l1d_flush_parse_cmdline(char *str) { if (!strcmp(str, "on")) l1d_flush_mitigation = L1D_FLUSH_ON; return 0; } early_param("l1d_flush", l1d_flush_parse_cmdline); #undef pr_fmt #define pr_fmt(fmt) "GDS: " fmt enum gds_mitigations { GDS_MITIGATION_OFF, GDS_MITIGATION_AUTO, GDS_MITIGATION_UCODE_NEEDED, GDS_MITIGATION_FORCE, GDS_MITIGATION_FULL, GDS_MITIGATION_FULL_LOCKED, GDS_MITIGATION_HYPERVISOR, }; static enum gds_mitigations gds_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF; static const char * const gds_strings[] = { [GDS_MITIGATION_OFF] = "Vulnerable", [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode", [GDS_MITIGATION_FULL] = "Mitigation: Microcode", [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", }; bool gds_ucode_mitigated(void) { return (gds_mitigation == GDS_MITIGATION_FULL || gds_mitigation == GDS_MITIGATION_FULL_LOCKED); } EXPORT_SYMBOL_GPL(gds_ucode_mitigated); void update_gds_msr(void) { u64 mcu_ctrl_after; u64 mcu_ctrl; switch (gds_mitigation) { case GDS_MITIGATION_OFF: rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); mcu_ctrl |= GDS_MITG_DIS; break; case GDS_MITIGATION_FULL_LOCKED: /* * The LOCKED state comes from the boot CPU. APs might not have * the same state. Make sure the mitigation is enabled on all * CPUs. */ case GDS_MITIGATION_FULL: rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); mcu_ctrl &= ~GDS_MITG_DIS; break; case GDS_MITIGATION_FORCE: case GDS_MITIGATION_UCODE_NEEDED: case GDS_MITIGATION_HYPERVISOR: case GDS_MITIGATION_AUTO: return; } wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); /* * Check to make sure that the WRMSR value was not ignored. Writes to * GDS_MITG_DIS will be ignored if this processor is locked but the boot * processor was not. */ rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); } static void __init gds_select_mitigation(void) { u64 mcu_ctrl; if (!boot_cpu_has_bug(X86_BUG_GDS)) return; if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { gds_mitigation = GDS_MITIGATION_HYPERVISOR; return; } /* Will verify below that mitigation _can_ be disabled */ if (gds_mitigation == GDS_MITIGATION_AUTO) { if (should_mitigate_vuln(X86_BUG_GDS)) gds_mitigation = GDS_MITIGATION_FULL; else gds_mitigation = GDS_MITIGATION_OFF; } /* No microcode */ if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) { if (gds_mitigation != GDS_MITIGATION_FORCE) gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; return; } /* Microcode has mitigation, use it */ if (gds_mitigation == GDS_MITIGATION_FORCE) gds_mitigation = GDS_MITIGATION_FULL; rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); if (mcu_ctrl & GDS_MITG_LOCKED) { if (gds_mitigation == GDS_MITIGATION_OFF) pr_warn("Mitigation locked. Disable failed.\n"); /* * The mitigation is selected from the boot CPU. All other CPUs * _should_ have the same state. If the boot CPU isn't locked * but others are then update_gds_msr() will WARN() of the state * mismatch. If the boot CPU is locked update_gds_msr() will * ensure the other CPUs have the mitigation enabled. */ gds_mitigation = GDS_MITIGATION_FULL_LOCKED; } } static void __init gds_apply_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_GDS)) return; /* Microcode is present */ if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL) update_gds_msr(); else if (gds_mitigation == GDS_MITIGATION_FORCE) { /* * This only needs to be done on the boot CPU so do it * here rather than in update_gds_msr() */ setup_clear_cpu_cap(X86_FEATURE_AVX); pr_warn("Microcode update needed! Disabling AVX as mitigation.\n"); } pr_info("%s\n", gds_strings[gds_mitigation]); } static int __init gds_parse_cmdline(char *str) { if (!str) return -EINVAL; if (!boot_cpu_has_bug(X86_BUG_GDS)) return 0; if (!strcmp(str, "off")) gds_mitigation = GDS_MITIGATION_OFF; else if (!strcmp(str, "force")) gds_mitigation = GDS_MITIGATION_FORCE; return 0; } early_param("gather_data_sampling", gds_parse_cmdline); #undef pr_fmt #define pr_fmt(fmt) "Spectre V1 : " fmt enum spectre_v1_mitigation { SPECTRE_V1_MITIGATION_NONE, SPECTRE_V1_MITIGATION_AUTO, }; static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ? SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE; static const char * const spectre_v1_strings[] = { [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization", }; /* * Does SMAP provide full mitigation against speculative kernel access to * userspace? */ static bool smap_works_speculatively(void) { if (!boot_cpu_has(X86_FEATURE_SMAP)) return false; /* * On CPUs which are vulnerable to Meltdown, SMAP does not * prevent speculative access to user data in the L1 cache. * Consider SMAP to be non-functional as a mitigation on these * CPUs. */ if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) return false; return true; } static void __init spectre_v1_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; if (!should_mitigate_vuln(X86_BUG_SPECTRE_V1)) spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; } static void __init spectre_v1_apply_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) return; if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { /* * With Spectre v1, a user can speculatively control either * path of a conditional swapgs with a user-controlled GS * value. The mitigation is to add lfences to both code paths. * * If FSGSBASE is enabled, the user can put a kernel address in * GS, in which case SMAP provides no protection. * * If FSGSBASE is disabled, the user can only put a user space * address in GS. That makes an attack harder, but still * possible if there's no SMAP protection. */ if (boot_cpu_has(X86_FEATURE_FSGSBASE) || !smap_works_speculatively()) { /* * Mitigation can be provided from SWAPGS itself or * PTI as the CR3 write in the Meltdown mitigation * is serializing. * * If neither is there, mitigate with an LFENCE to * stop speculation through swapgs. */ if (boot_cpu_has_bug(X86_BUG_SWAPGS) && !boot_cpu_has(X86_FEATURE_PTI)) setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); /* * Enable lfences in the kernel entry (non-swapgs) * paths, to prevent user entry from speculatively * skipping swapgs. */ setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); } } pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); } static int __init nospectre_v1_cmdline(char *str) { spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; return 0; } early_param("nospectre_v1", nospectre_v1_cmdline); enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; /* Depends on spectre_v2 mitigation selected already */ static inline bool cdt_possible(enum spectre_v2_mitigation mode) { if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) || !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) return false; if (mode == SPECTRE_V2_RETPOLINE || mode == SPECTRE_V2_EIBRS_RETPOLINE) return true; return false; } #undef pr_fmt #define pr_fmt(fmt) "RETBleed: " fmt enum its_mitigation { ITS_MITIGATION_OFF, ITS_MITIGATION_AUTO, ITS_MITIGATION_VMEXIT_ONLY, ITS_MITIGATION_ALIGNED_THUNKS, ITS_MITIGATION_RETPOLINE_STUFF, }; static enum its_mitigation its_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_MITIGATION_AUTO : ITS_MITIGATION_OFF; enum retbleed_mitigation { RETBLEED_MITIGATION_NONE, RETBLEED_MITIGATION_AUTO, RETBLEED_MITIGATION_UNRET, RETBLEED_MITIGATION_IBPB, RETBLEED_MITIGATION_IBRS, RETBLEED_MITIGATION_EIBRS, RETBLEED_MITIGATION_STUFF, }; static const char * const retbleed_strings[] = { [RETBLEED_MITIGATION_NONE] = "Vulnerable", [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing", }; static enum retbleed_mitigation retbleed_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE; static int __ro_after_init retbleed_nosmt = false; enum srso_mitigation { SRSO_MITIGATION_NONE, SRSO_MITIGATION_AUTO, SRSO_MITIGATION_UCODE_NEEDED, SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED, SRSO_MITIGATION_MICROCODE, SRSO_MITIGATION_NOSMT, SRSO_MITIGATION_SAFE_RET, SRSO_MITIGATION_IBPB, SRSO_MITIGATION_IBPB_ON_VMEXIT, SRSO_MITIGATION_BP_SPEC_REDUCE, }; static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO; static int __init retbleed_parse_cmdline(char *str) { if (!str) return -EINVAL; while (str) { char *next = strchr(str, ','); if (next) { *next = 0; next++; } if (!strcmp(str, "off")) { retbleed_mitigation = RETBLEED_MITIGATION_NONE; } else if (!strcmp(str, "auto")) { retbleed_mitigation = RETBLEED_MITIGATION_AUTO; } else if (!strcmp(str, "unret")) { retbleed_mitigation = RETBLEED_MITIGATION_UNRET; } else if (!strcmp(str, "ibpb")) { retbleed_mitigation = RETBLEED_MITIGATION_IBPB; } else if (!strcmp(str, "stuff")) { retbleed_mitigation = RETBLEED_MITIGATION_STUFF; } else if (!strcmp(str, "nosmt")) { retbleed_nosmt = true; } else if (!strcmp(str, "force")) { setup_force_cpu_bug(X86_BUG_RETBLEED); } else { pr_err("Ignoring unknown retbleed option (%s).", str); } str = next; } return 0; } early_param("retbleed", retbleed_parse_cmdline); #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" static void __init retbleed_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) { retbleed_mitigation = RETBLEED_MITIGATION_NONE; return; } switch (retbleed_mitigation) { case RETBLEED_MITIGATION_UNRET: if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) { retbleed_mitigation = RETBLEED_MITIGATION_AUTO; pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n"); } break; case RETBLEED_MITIGATION_IBPB: if (!boot_cpu_has(X86_FEATURE_IBPB)) { pr_err("WARNING: CPU does not support IBPB.\n"); retbleed_mitigation = RETBLEED_MITIGATION_AUTO; } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); retbleed_mitigation = RETBLEED_MITIGATION_AUTO; } break; case RETBLEED_MITIGATION_STUFF: if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) { pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n"); retbleed_mitigation = RETBLEED_MITIGATION_AUTO; } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n"); retbleed_mitigation = RETBLEED_MITIGATION_AUTO; } break; default: break; } if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO) return; if (!should_mitigate_vuln(X86_BUG_RETBLEED)) { retbleed_mitigation = RETBLEED_MITIGATION_NONE; return; } /* Intel mitigation selected in retbleed_update_mitigation() */ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) retbleed_mitigation = RETBLEED_MITIGATION_UNRET; else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB)) retbleed_mitigation = RETBLEED_MITIGATION_IBPB; else retbleed_mitigation = RETBLEED_MITIGATION_NONE; } else if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { /* Final mitigation depends on spectre-v2 selection */ if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; else if (boot_cpu_has(X86_FEATURE_IBRS)) retbleed_mitigation = RETBLEED_MITIGATION_IBRS; else retbleed_mitigation = RETBLEED_MITIGATION_NONE; } } static void __init retbleed_update_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) return; /* ITS can also enable stuffing */ if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF) retbleed_mitigation = RETBLEED_MITIGATION_STUFF; /* If SRSO is using IBPB, that works for retbleed too */ if (srso_mitigation == SRSO_MITIGATION_IBPB) retbleed_mitigation = RETBLEED_MITIGATION_IBPB; if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF && !cdt_possible(spectre_v2_enabled)) { pr_err("WARNING: retbleed=stuff depends on retpoline\n"); retbleed_mitigation = RETBLEED_MITIGATION_NONE; } /* * Let IBRS trump all on Intel without affecting the effects of the * retbleed= cmdline option except for call depth based stuffing */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { switch (spectre_v2_enabled) { case SPECTRE_V2_IBRS: retbleed_mitigation = RETBLEED_MITIGATION_IBRS; break; case SPECTRE_V2_EIBRS: case SPECTRE_V2_EIBRS_RETPOLINE: case SPECTRE_V2_EIBRS_LFENCE: retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; break; default: if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) { if (retbleed_mitigation != RETBLEED_MITIGATION_NONE) pr_err(RETBLEED_INTEL_MSG); retbleed_mitigation = RETBLEED_MITIGATION_NONE; } } } pr_info("%s\n", retbleed_strings[retbleed_mitigation]); } static void __init retbleed_apply_mitigation(void) { bool mitigate_smt = false; switch (retbleed_mitigation) { case RETBLEED_MITIGATION_NONE: return; case RETBLEED_MITIGATION_UNRET: setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_UNRET); set_return_thunk(retbleed_return_thunk); if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) pr_err(RETBLEED_UNTRAIN_MSG); mitigate_smt = true; break; case RETBLEED_MITIGATION_IBPB: setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); mitigate_smt = true; /* * IBPB on entry already obviates the need for * software-based untraining so clear those in case some * other mitigation like SRSO has selected them. */ setup_clear_cpu_cap(X86_FEATURE_UNRET); setup_clear_cpu_cap(X86_FEATURE_RETHUNK); /* * There is no need for RSB filling: write_ibpb() ensures * all predictions, including the RSB, are invalidated, * regardless of IBPB implementation. */ setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); break; case RETBLEED_MITIGATION_STUFF: setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); set_return_thunk(call_depth_return_thunk); break; default: break; } if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && (retbleed_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)) cpu_smt_disable(false); } #undef pr_fmt #define pr_fmt(fmt) "ITS: " fmt static const char * const its_strings[] = { [ITS_MITIGATION_OFF] = "Vulnerable", [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected", [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks", [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB", }; static int __init its_parse_cmdline(char *str) { if (!str) return -EINVAL; if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) { pr_err("Mitigation disabled at compile time, ignoring option (%s)", str); return 0; } if (!strcmp(str, "off")) { its_mitigation = ITS_MITIGATION_OFF; } else if (!strcmp(str, "on")) { its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; } else if (!strcmp(str, "force")) { its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; setup_force_cpu_bug(X86_BUG_ITS); } else if (!strcmp(str, "vmexit")) { its_mitigation = ITS_MITIGATION_VMEXIT_ONLY; } else if (!strcmp(str, "stuff")) { its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; } else { pr_err("Ignoring unknown indirect_target_selection option (%s).", str); } return 0; } early_param("indirect_target_selection", its_parse_cmdline); static void __init its_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_ITS)) { its_mitigation = ITS_MITIGATION_OFF; return; } if (its_mitigation == ITS_MITIGATION_AUTO) { if (should_mitigate_vuln(X86_BUG_ITS)) its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; else its_mitigation = ITS_MITIGATION_OFF; } if (its_mitigation == ITS_MITIGATION_OFF) return; if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) { pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n"); its_mitigation = ITS_MITIGATION_OFF; return; } if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) { pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n"); its_mitigation = ITS_MITIGATION_OFF; return; } if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF && !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) { pr_err("RSB stuff mitigation not supported, using default\n"); its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; } if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY && !boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; } static void __init its_update_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_ITS)) return; switch (spectre_v2_enabled) { case SPECTRE_V2_NONE: if (its_mitigation != ITS_MITIGATION_OFF) pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n"); its_mitigation = ITS_MITIGATION_OFF; break; case SPECTRE_V2_RETPOLINE: case SPECTRE_V2_EIBRS_RETPOLINE: /* Retpoline+CDT mitigates ITS */ if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF) its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; break; case SPECTRE_V2_LFENCE: case SPECTRE_V2_EIBRS_LFENCE: pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n"); its_mitigation = ITS_MITIGATION_OFF; break; default: break; } if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF && !cdt_possible(spectre_v2_enabled)) its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; pr_info("%s\n", its_strings[its_mitigation]); } static void __init its_apply_mitigation(void) { switch (its_mitigation) { case ITS_MITIGATION_OFF: case ITS_MITIGATION_AUTO: case ITS_MITIGATION_VMEXIT_ONLY: break; case ITS_MITIGATION_ALIGNED_THUNKS: if (!boot_cpu_has(X86_FEATURE_RETPOLINE)) setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS); setup_force_cpu_cap(X86_FEATURE_RETHUNK); set_return_thunk(its_return_thunk); break; case ITS_MITIGATION_RETPOLINE_STUFF: setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); set_return_thunk(call_depth_return_thunk); break; } } #undef pr_fmt #define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt enum tsa_mitigations { TSA_MITIGATION_NONE, TSA_MITIGATION_AUTO, TSA_MITIGATION_UCODE_NEEDED, TSA_MITIGATION_USER_KERNEL, TSA_MITIGATION_VM, TSA_MITIGATION_FULL, }; static const char * const tsa_strings[] = { [TSA_MITIGATION_NONE] = "Vulnerable", [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary", [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM", [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", }; static enum tsa_mitigations tsa_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE; static int __init tsa_parse_cmdline(char *str) { if (!str) return -EINVAL; if (!strcmp(str, "off")) tsa_mitigation = TSA_MITIGATION_NONE; else if (!strcmp(str, "on")) tsa_mitigation = TSA_MITIGATION_FULL; else if (!strcmp(str, "user")) tsa_mitigation = TSA_MITIGATION_USER_KERNEL; else if (!strcmp(str, "vm")) tsa_mitigation = TSA_MITIGATION_VM; else pr_err("Ignoring unknown tsa=%s option.\n", str); return 0; } early_param("tsa", tsa_parse_cmdline); static void __init tsa_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_TSA)) { tsa_mitigation = TSA_MITIGATION_NONE; return; } if (tsa_mitigation == TSA_MITIGATION_AUTO) { bool vm = false, uk = false; tsa_mitigation = TSA_MITIGATION_NONE; if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) { tsa_mitigation = TSA_MITIGATION_USER_KERNEL; uk = true; } if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) { tsa_mitigation = TSA_MITIGATION_VM; vm = true; } if (uk && vm) tsa_mitigation = TSA_MITIGATION_FULL; } if (tsa_mitigation == TSA_MITIGATION_NONE) return; if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED; /* * No need to set verw_clear_cpu_buf_mitigation_selected - it * doesn't fit all cases here and it is not needed because this * is the only VERW-based mitigation on AMD. */ pr_info("%s\n", tsa_strings[tsa_mitigation]); } static void __init tsa_apply_mitigation(void) { switch (tsa_mitigation) { case TSA_MITIGATION_USER_KERNEL: setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); break; case TSA_MITIGATION_VM: setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); break; case TSA_MITIGATION_FULL: setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); break; default: break; } } #undef pr_fmt #define pr_fmt(fmt) "Spectre V2 : " fmt static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = SPECTRE_V2_USER_NONE; static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = SPECTRE_V2_USER_NONE; #ifdef CONFIG_MITIGATION_RETPOLINE static bool spectre_v2_bad_module; bool retpoline_module_ok(bool has_retpoline) { if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) return true; pr_err("System may be vulnerable to spectre v2\n"); spectre_v2_bad_module = true; return false; } static inline const char *spectre_v2_module_string(void) { return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; } #else static inline const char *spectre_v2_module_string(void) { return ""; } #endif #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" #ifdef CONFIG_BPF_SYSCALL void unpriv_ebpf_notify(int new_state) { if (new_state) return; /* Unprivileged eBPF is enabled */ switch (spectre_v2_enabled) { case SPECTRE_V2_EIBRS: pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); break; case SPECTRE_V2_EIBRS_LFENCE: if (sched_smt_active()) pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); break; default: break; } } #endif /* The kernel command line selection for spectre v2 */ enum spectre_v2_mitigation_cmd { SPECTRE_V2_CMD_NONE, SPECTRE_V2_CMD_AUTO, SPECTRE_V2_CMD_FORCE, SPECTRE_V2_CMD_RETPOLINE, SPECTRE_V2_CMD_RETPOLINE_GENERIC, SPECTRE_V2_CMD_RETPOLINE_LFENCE, SPECTRE_V2_CMD_EIBRS, SPECTRE_V2_CMD_EIBRS_RETPOLINE, SPECTRE_V2_CMD_EIBRS_LFENCE, SPECTRE_V2_CMD_IBRS, }; static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE; enum spectre_v2_user_mitigation_cmd { SPECTRE_V2_USER_CMD_NONE, SPECTRE_V2_USER_CMD_AUTO, SPECTRE_V2_USER_CMD_FORCE, SPECTRE_V2_USER_CMD_PRCTL, SPECTRE_V2_USER_CMD_PRCTL_IBPB, SPECTRE_V2_USER_CMD_SECCOMP, SPECTRE_V2_USER_CMD_SECCOMP_IBPB, }; static enum spectre_v2_user_mitigation_cmd spectre_v2_user_cmd __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE; static const char * const spectre_v2_user_strings[] = { [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", }; static int __init spectre_v2_user_parse_cmdline(char *str) { if (!str) return -EINVAL; if (!strcmp(str, "auto")) spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_AUTO; else if (!strcmp(str, "off")) spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_NONE; else if (!strcmp(str, "on")) spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_FORCE; else if (!strcmp(str, "prctl")) spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_PRCTL; else if (!strcmp(str, "prctl,ibpb")) spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_PRCTL_IBPB; else if (!strcmp(str, "seccomp")) spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_SECCOMP; else if (!strcmp(str, "seccomp,ibpb")) spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_SECCOMP_IBPB; else pr_err("Ignoring unknown spectre_v2_user option (%s).", str); return 0; } early_param("spectre_v2_user", spectre_v2_user_parse_cmdline); static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) { return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS; } static void __init spectre_v2_user_select_mitigation(void) { if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) return; switch (spectre_v2_user_cmd) { case SPECTRE_V2_USER_CMD_NONE: return; case SPECTRE_V2_USER_CMD_FORCE: spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT; break; case SPECTRE_V2_USER_CMD_AUTO: if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2_USER)) break; spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; if (smt_mitigations == SMT_MITIGATIONS_OFF) break; spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; break; case SPECTRE_V2_USER_CMD_PRCTL: spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; break; case SPECTRE_V2_USER_CMD_PRCTL_IBPB: spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; break; case SPECTRE_V2_USER_CMD_SECCOMP: if (IS_ENABLED(CONFIG_SECCOMP)) spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP; else spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; spectre_v2_user_stibp = spectre_v2_user_ibpb; break; case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; if (IS_ENABLED(CONFIG_SECCOMP)) spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP; else spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; break; } /* * At this point, an STIBP mode other than "off" has been set. * If STIBP support is not being forced, check if STIBP always-on * is preferred. */ if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) && boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED; if (!boot_cpu_has(X86_FEATURE_IBPB)) spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE; if (!boot_cpu_has(X86_FEATURE_STIBP)) spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; } static void __init spectre_v2_user_update_mitigation(void) { if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) return; /* The spectre_v2 cmd line can override spectre_v2_user options */ if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) { spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE; spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) { spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT; } /* * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP * is not required. * * Intel's Enhanced IBRS also protects against cross-thread branch target * injection in user-mode as the IBRS bit remains always set which * implicitly enables cross-thread protections. However, in legacy IBRS * mode, the IBRS bit is set only on kernel entry and cleared on return * to userspace. AMD Automatic IBRS also does not protect userspace. * These modes therefore disable the implicit cross-thread protection, * so allow for STIBP to be selected in those cases. */ if (!boot_cpu_has(X86_FEATURE_STIBP) || !cpu_smt_possible() || (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && !boot_cpu_has(X86_FEATURE_AUTOIBRS))) { spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; return; } if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE && (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) { if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT && spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED) pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED; } pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]); } static void __init spectre_v2_user_apply_mitigation(void) { /* Initialize Indirect Branch Prediction Barrier */ if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) { static_branch_enable(&switch_vcpu_ibpb); switch (spectre_v2_user_ibpb) { case SPECTRE_V2_USER_STRICT: static_branch_enable(&switch_mm_always_ibpb); break; case SPECTRE_V2_USER_PRCTL: case SPECTRE_V2_USER_SECCOMP: static_branch_enable(&switch_mm_cond_ibpb); break; default: break; } pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", static_key_enabled(&switch_mm_always_ibpb) ? "always-on" : "conditional"); } } static const char * const spectre_v2_strings[] = { [SPECTRE_V2_NONE] = "Vulnerable", [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", [SPECTRE_V2_LFENCE] = "Vulnerable: LFENCE", [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS", [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE", [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines", [SPECTRE_V2_IBRS] = "Mitigation: IBRS", }; static bool nospectre_v2 __ro_after_init; static int __init nospectre_v2_parse_cmdline(char *str) { nospectre_v2 = true; spectre_v2_cmd = SPECTRE_V2_CMD_NONE; return 0; } early_param("nospectre_v2", nospectre_v2_parse_cmdline); static int __init spectre_v2_parse_cmdline(char *str) { if (!str) return -EINVAL; if (nospectre_v2) return 0; if (!strcmp(str, "off")) { spectre_v2_cmd = SPECTRE_V2_CMD_NONE; } else if (!strcmp(str, "on")) { spectre_v2_cmd = SPECTRE_V2_CMD_FORCE; setup_force_cpu_bug(X86_BUG_SPECTRE_V2); setup_force_cpu_bug(X86_BUG_SPECTRE_V2_USER); } else if (!strcmp(str, "retpoline")) { spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE; } else if (!strcmp(str, "retpoline,amd") || !strcmp(str, "retpoline,lfence")) { spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE_LFENCE; } else if (!strcmp(str, "retpoline,generic")) { spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE_GENERIC; } else if (!strcmp(str, "eibrs")) { spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS; } else if (!strcmp(str, "eibrs,lfence")) { spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS_LFENCE; } else if (!strcmp(str, "eibrs,retpoline")) { spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS_RETPOLINE; } else if (!strcmp(str, "auto")) { spectre_v2_cmd = SPECTRE_V2_CMD_AUTO; } else if (!strcmp(str, "ibrs")) { spectre_v2_cmd = SPECTRE_V2_CMD_IBRS; } else { pr_err("Ignoring unknown spectre_v2 option (%s).", str); } return 0; } early_param("spectre_v2", spectre_v2_parse_cmdline); static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) { if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { pr_err("Kernel not compiled with retpoline; no mitigation available!"); return SPECTRE_V2_NONE; } return SPECTRE_V2_RETPOLINE; } static bool __ro_after_init rrsba_disabled; /* Disable in-kernel use of non-RSB RET predictors */ static void __init spec_ctrl_disable_kernel_rrsba(void) { if (rrsba_disabled) return; if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) { rrsba_disabled = true; return; } if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) return; x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; update_spec_ctrl(x86_spec_ctrl_base); rrsba_disabled = true; } static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode) { /* * WARNING! There are many subtleties to consider when changing *any* * code related to RSB-related mitigations. Before doing so, carefully * read the following document, and update if necessary: * * Documentation/admin-guide/hw-vuln/rsb.rst * * In an overly simplified nutshell: * * - User->user RSB attacks are conditionally mitigated during * context switches by cond_mitigation -> write_ibpb(). * * - User->kernel and guest->host attacks are mitigated by eIBRS or * RSB filling. * * Though, depending on config, note that other alternative * mitigations may end up getting used instead, e.g., IBPB on * entry/vmexit, call depth tracking, or return thunks. */ switch (mode) { case SPECTRE_V2_NONE: break; case SPECTRE_V2_EIBRS: case SPECTRE_V2_EIBRS_LFENCE: case SPECTRE_V2_EIBRS_RETPOLINE: if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); } break; case SPECTRE_V2_RETPOLINE: case SPECTRE_V2_LFENCE: case SPECTRE_V2_IBRS: pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n"); setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); break; default: pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n"); dump_stack(); break; } } /* * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by * branch history in userspace. Not needed if BHI_NO is set. */ static bool __init spec_ctrl_bhi_dis(void) { if (!boot_cpu_has(X86_FEATURE_BHI_CTRL)) return false; x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S; update_spec_ctrl(x86_spec_ctrl_base); setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW); return true; } enum bhi_mitigations { BHI_MITIGATION_OFF, BHI_MITIGATION_AUTO, BHI_MITIGATION_ON, BHI_MITIGATION_VMEXIT_ONLY, }; static enum bhi_mitigations bhi_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF; static int __init spectre_bhi_parse_cmdline(char *str) { if (!str) return -EINVAL; if (!strcmp(str, "off")) bhi_mitigation = BHI_MITIGATION_OFF; else if (!strcmp(str, "on")) bhi_mitigation = BHI_MITIGATION_ON; else if (!strcmp(str, "vmexit")) bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY; else pr_err("Ignoring unknown spectre_bhi option (%s)", str); return 0; } early_param("spectre_bhi", spectre_bhi_parse_cmdline); static void __init bhi_select_mitigation(void) { if (!boot_cpu_has(X86_BUG_BHI)) bhi_mitigation = BHI_MITIGATION_OFF; if (bhi_mitigation != BHI_MITIGATION_AUTO) return; if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) { if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL)) bhi_mitigation = BHI_MITIGATION_ON; else bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY; } else { bhi_mitigation = BHI_MITIGATION_OFF; } } static void __init bhi_update_mitigation(void) { if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) bhi_mitigation = BHI_MITIGATION_OFF; } static void __init bhi_apply_mitigation(void) { if (bhi_mitigation == BHI_MITIGATION_OFF) return; /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */ if (boot_cpu_has(X86_FEATURE_RETPOLINE) && !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) { spec_ctrl_disable_kernel_rrsba(); if (rrsba_disabled) return; } if (!IS_ENABLED(CONFIG_X86_64)) return; /* Mitigate in hardware if supported */ if (spec_ctrl_bhi_dis()) return; if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) { pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n"); setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT); return; } pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n"); setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP); setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT); } static void __init spectre_v2_select_mitigation(void) { if ((spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE || spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { pr_err("RETPOLINE selected but not compiled in. Switching to AUTO select\n"); spectre_v2_cmd = SPECTRE_V2_CMD_AUTO; } if ((spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS || spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { pr_err("EIBRS selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n"); spectre_v2_cmd = SPECTRE_V2_CMD_AUTO; } if ((spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { pr_err("LFENCE selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n"); spectre_v2_cmd = SPECTRE_V2_CMD_AUTO; } if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) { pr_err("IBRS selected but not compiled in. Switching to AUTO select\n"); spectre_v2_cmd = SPECTRE_V2_CMD_AUTO; } if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { pr_err("IBRS selected but not Intel CPU. Switching to AUTO select\n"); spectre_v2_cmd = SPECTRE_V2_CMD_AUTO; } if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { pr_err("IBRS selected but CPU doesn't have IBRS. Switching to AUTO select\n"); spectre_v2_cmd = SPECTRE_V2_CMD_AUTO; } if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) { pr_err("IBRS selected but running as XenPV guest. Switching to AUTO select\n"); spectre_v2_cmd = SPECTRE_V2_CMD_AUTO; } if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) { spectre_v2_cmd = SPECTRE_V2_CMD_NONE; return; } switch (spectre_v2_cmd) { case SPECTRE_V2_CMD_NONE: return; case SPECTRE_V2_CMD_AUTO: if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2)) break; fallthrough; case SPECTRE_V2_CMD_FORCE: if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { spectre_v2_enabled = SPECTRE_V2_EIBRS; break; } spectre_v2_enabled = spectre_v2_select_retpoline(); break; case SPECTRE_V2_CMD_RETPOLINE_LFENCE: pr_err(SPECTRE_V2_LFENCE_MSG); spectre_v2_enabled = SPECTRE_V2_LFENCE; break; case SPECTRE_V2_CMD_RETPOLINE_GENERIC: spectre_v2_enabled = SPECTRE_V2_RETPOLINE; break; case SPECTRE_V2_CMD_RETPOLINE: spectre_v2_enabled = spectre_v2_select_retpoline(); break; case SPECTRE_V2_CMD_IBRS: spectre_v2_enabled = SPECTRE_V2_IBRS; break; case SPECTRE_V2_CMD_EIBRS: spectre_v2_enabled = SPECTRE_V2_EIBRS; break; case SPECTRE_V2_CMD_EIBRS_LFENCE: spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE; break; case SPECTRE_V2_CMD_EIBRS_RETPOLINE: spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE; break; } } static void __init spectre_v2_update_mitigation(void) { if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO && !spectre_v2_in_eibrs_mode(spectre_v2_enabled)) { if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) && boot_cpu_has_bug(X86_BUG_RETBLEED) && retbleed_mitigation != RETBLEED_MITIGATION_NONE && retbleed_mitigation != RETBLEED_MITIGATION_STUFF && boot_cpu_has(X86_FEATURE_IBRS) && boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { spectre_v2_enabled = SPECTRE_V2_IBRS; } } if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]); } static void __init spectre_v2_apply_mitigation(void) { if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) { if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) { msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); } else { x86_spec_ctrl_base |= SPEC_CTRL_IBRS; update_spec_ctrl(x86_spec_ctrl_base); } } switch (spectre_v2_enabled) { case SPECTRE_V2_NONE: return; case SPECTRE_V2_EIBRS: break; case SPECTRE_V2_IBRS: setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) pr_warn(SPECTRE_V2_IBRS_PERF_MSG); break; case SPECTRE_V2_LFENCE: case SPECTRE_V2_EIBRS_LFENCE: setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); fallthrough; case SPECTRE_V2_RETPOLINE: case SPECTRE_V2_EIBRS_RETPOLINE: setup_force_cpu_cap(X86_FEATURE_RETPOLINE); break; } /* * Disable alternate RSB predictions in kernel when indirect CALLs and * JMPs gets protection against BHI and Intramode-BTI, but RET * prediction from a non-RSB predictor is still a risk. */ if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE || spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE || spectre_v2_enabled == SPECTRE_V2_RETPOLINE) spec_ctrl_disable_kernel_rrsba(); spectre_v2_select_rsb_mitigation(spectre_v2_enabled); /* * Retpoline protects the kernel, but doesn't protect firmware. IBRS * and Enhanced IBRS protect firmware too, so enable IBRS around * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't * otherwise enabled. * * Use "spectre_v2_enabled" to check Enhanced IBRS instead of * boot_cpu_has(), because the user might select retpoline on the kernel * command line and if the CPU supports Enhanced IBRS, kernel might * un-intentionally not enable IBRS around firmware calls. */ if (boot_cpu_has_bug(X86_BUG_RETBLEED) && boot_cpu_has(X86_FEATURE_IBPB) && (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) { setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); pr_info("Enabling Speculation Barrier for firmware calls\n"); } } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) { setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); pr_info("Enabling Restricted Speculation for firmware calls\n"); } } static void update_stibp_msr(void * __unused) { u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); update_spec_ctrl(val); } /* Update x86_spec_ctrl_base in case SMT state changed. */ static void update_stibp_strict(void) { u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; if (sched_smt_active()) mask |= SPEC_CTRL_STIBP; if (mask == x86_spec_ctrl_base) return; pr_info("Update user space SMT mitigation: STIBP %s\n", mask & SPEC_CTRL_STIBP ? "always-on" : "off"); x86_spec_ctrl_base = mask; on_each_cpu(update_stibp_msr, NULL, 1); } /* Update the static key controlling the evaluation of TIF_SPEC_IB */ static void update_indir_branch_cond(void) { if (sched_smt_active()) static_branch_enable(&switch_to_cond_stibp); else static_branch_disable(&switch_to_cond_stibp); } #undef pr_fmt #define pr_fmt(fmt) fmt /* Update the static key controlling the MDS CPU buffer clear in idle */ static void update_mds_branch_idle(void) { /* * Enable the idle clearing if SMT is active on CPUs which are * affected only by MSBDS and not any other MDS variant. * * The other variants cannot be mitigated when SMT is enabled, so * clearing the buffers on idle just to prevent the Store Buffer * repartitioning leak would be a window dressing exercise. */ if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) return; if (sched_smt_active()) { static_branch_enable(&cpu_buf_idle_clear); } else if (mmio_mitigation == MMIO_MITIGATION_OFF || (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { static_branch_disable(&cpu_buf_idle_clear); } } #undef pr_fmt #define pr_fmt(fmt) "Speculative Store Bypass: " fmt static enum ssb_mitigation ssb_mode __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_SSB) ? SPEC_STORE_BYPASS_AUTO : SPEC_STORE_BYPASS_NONE; static const char * const ssb_strings[] = { [SPEC_STORE_BYPASS_NONE] = "Vulnerable", [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", }; static bool nossb __ro_after_init; static int __init nossb_parse_cmdline(char *str) { nossb = true; ssb_mode = SPEC_STORE_BYPASS_NONE; return 0; } early_param("nospec_store_bypass_disable", nossb_parse_cmdline); static int __init ssb_parse_cmdline(char *str) { if (!str) return -EINVAL; if (nossb) return 0; if (!strcmp(str, "auto")) ssb_mode = SPEC_STORE_BYPASS_AUTO; else if (!strcmp(str, "on")) ssb_mode = SPEC_STORE_BYPASS_DISABLE; else if (!strcmp(str, "off")) ssb_mode = SPEC_STORE_BYPASS_NONE; else if (!strcmp(str, "prctl")) ssb_mode = SPEC_STORE_BYPASS_PRCTL; else if (!strcmp(str, "seccomp")) ssb_mode = IS_ENABLED(CONFIG_SECCOMP) ? SPEC_STORE_BYPASS_SECCOMP : SPEC_STORE_BYPASS_PRCTL; else pr_err("Ignoring unknown spec_store_bypass_disable option (%s).\n", str); return 0; } early_param("spec_store_bypass_disable", ssb_parse_cmdline); static void __init ssb_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) { ssb_mode = SPEC_STORE_BYPASS_NONE; return; } if (ssb_mode == SPEC_STORE_BYPASS_AUTO) { if (should_mitigate_vuln(X86_BUG_SPEC_STORE_BYPASS)) ssb_mode = SPEC_STORE_BYPASS_PRCTL; else ssb_mode = SPEC_STORE_BYPASS_NONE; } if (!boot_cpu_has(X86_FEATURE_SSBD)) ssb_mode = SPEC_STORE_BYPASS_NONE; pr_info("%s\n", ssb_strings[ssb_mode]); } static void __init ssb_apply_mitigation(void) { /* * We have three CPU feature flags that are in play here: * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation */ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) { setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); /* * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may * use a completely different MSR and bit dependent on family. */ if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && !static_cpu_has(X86_FEATURE_AMD_SSBD)) { x86_amd_ssb_disable(); } else { x86_spec_ctrl_base |= SPEC_CTRL_SSBD; update_spec_ctrl(x86_spec_ctrl_base); } } } #undef pr_fmt #define pr_fmt(fmt) "Speculation prctl: " fmt static void task_update_spec_tif(struct task_struct *tsk) { /* Force the update of the real TIF bits */ set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); /* * Immediately update the speculation control MSRs for the current * task, but for a non-current task delay setting the CPU * mitigation until it is scheduled next. * * This can only happen for SECCOMP mitigation. For PRCTL it's * always the current task. */ if (tsk == current) speculation_ctrl_update_current(); } static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl) { if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) return -EPERM; switch (ctrl) { case PR_SPEC_ENABLE: set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); return 0; case PR_SPEC_DISABLE: clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); return 0; default: return -ERANGE; } } static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) { if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && ssb_mode != SPEC_STORE_BYPASS_SECCOMP) return -ENXIO; switch (ctrl) { case PR_SPEC_ENABLE: /* If speculation is force disabled, enable is not allowed */ if (task_spec_ssb_force_disable(task)) return -EPERM; task_clear_spec_ssb_disable(task); task_clear_spec_ssb_noexec(task); task_update_spec_tif(task); break; case PR_SPEC_DISABLE: task_set_spec_ssb_disable(task); task_clear_spec_ssb_noexec(task); task_update_spec_tif(task); break; case PR_SPEC_FORCE_DISABLE: task_set_spec_ssb_disable(task); task_set_spec_ssb_force_disable(task); task_clear_spec_ssb_noexec(task); task_update_spec_tif(task); break; case PR_SPEC_DISABLE_NOEXEC: if (task_spec_ssb_force_disable(task)) return -EPERM; task_set_spec_ssb_disable(task); task_set_spec_ssb_noexec(task); task_update_spec_tif(task); break; default: return -ERANGE; } return 0; } static bool is_spec_ib_user_controlled(void) { return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; } static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) { switch (ctrl) { case PR_SPEC_ENABLE: if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return 0; /* * With strict mode for both IBPB and STIBP, the instruction * code paths avoid checking this task flag and instead, * unconditionally run the instruction. However, STIBP and IBPB * are independent and either can be set to conditionally * enabled regardless of the mode of the other. * * If either is set to conditional, allow the task flag to be * updated, unless it was force-disabled by a previous prctl * call. Currently, this is possible on an AMD CPU which has the * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the * kernel is booted with 'spectre_v2_user=seccomp', then * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. */ if (!is_spec_ib_user_controlled() || task_spec_ib_force_disable(task)) return -EPERM; task_clear_spec_ib_disable(task); task_update_spec_tif(task); break; case PR_SPEC_DISABLE: case PR_SPEC_FORCE_DISABLE: /* * Indirect branch speculation is always allowed when * mitigation is force disabled. */ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return -EPERM; if (!is_spec_ib_user_controlled()) return 0; task_set_spec_ib_disable(task); if (ctrl == PR_SPEC_FORCE_DISABLE) task_set_spec_ib_force_disable(task); task_update_spec_tif(task); if (task == current) indirect_branch_prediction_barrier(); break; default: return -ERANGE; } return 0; } int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, unsigned long ctrl) { switch (which) { case PR_SPEC_STORE_BYPASS: return ssb_prctl_set(task, ctrl); case PR_SPEC_INDIRECT_BRANCH: return ib_prctl_set(task, ctrl); case PR_SPEC_L1D_FLUSH: return l1d_flush_prctl_set(task, ctrl); default: return -ENODEV; } } #ifdef CONFIG_SECCOMP void arch_seccomp_spec_mitigate(struct task_struct *task) { if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); } #endif static int l1d_flush_prctl_get(struct task_struct *task) { if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) return PR_SPEC_FORCE_DISABLE; if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH)) return PR_SPEC_PRCTL | PR_SPEC_ENABLE; else return PR_SPEC_PRCTL | PR_SPEC_DISABLE; } static int ssb_prctl_get(struct task_struct *task) { switch (ssb_mode) { case SPEC_STORE_BYPASS_NONE: if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) return PR_SPEC_ENABLE; return PR_SPEC_NOT_AFFECTED; case SPEC_STORE_BYPASS_DISABLE: return PR_SPEC_DISABLE; case SPEC_STORE_BYPASS_SECCOMP: case SPEC_STORE_BYPASS_PRCTL: case SPEC_STORE_BYPASS_AUTO: if (task_spec_ssb_force_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; if (task_spec_ssb_noexec(task)) return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; if (task_spec_ssb_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_DISABLE; return PR_SPEC_PRCTL | PR_SPEC_ENABLE; } BUG(); } static int ib_prctl_get(struct task_struct *task) { if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) return PR_SPEC_NOT_AFFECTED; if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return PR_SPEC_ENABLE; else if (is_spec_ib_user_controlled()) { if (task_spec_ib_force_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; if (task_spec_ib_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_DISABLE; return PR_SPEC_PRCTL | PR_SPEC_ENABLE; } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) return PR_SPEC_DISABLE; else return PR_SPEC_NOT_AFFECTED; } int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) { switch (which) { case PR_SPEC_STORE_BYPASS: return ssb_prctl_get(task); case PR_SPEC_INDIRECT_BRANCH: return ib_prctl_get(task); case PR_SPEC_L1D_FLUSH: return l1d_flush_prctl_get(task); default: return -ENODEV; } } void x86_spec_ctrl_setup_ap(void) { if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) update_spec_ctrl(x86_spec_ctrl_base); if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) x86_amd_ssb_disable(); } bool itlb_multihit_kvm_mitigation; EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); #undef pr_fmt #define pr_fmt(fmt) "L1TF: " fmt /* Default mitigation for L1TF-affected CPUs */ enum l1tf_mitigations l1tf_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF; #if IS_ENABLED(CONFIG_KVM_INTEL) EXPORT_SYMBOL_GPL(l1tf_mitigation); #endif enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); /* * These CPUs all support 44bits physical address space internally in the * cache but CPUID can report a smaller number of physical address bits. * * The L1TF mitigation uses the top most address bit for the inversion of * non present PTEs. When the installed memory reaches into the top most * address bit due to memory holes, which has been observed on machines * which report 36bits physical address bits and have 32G RAM installed, * then the mitigation range check in l1tf_select_mitigation() triggers. * This is a false positive because the mitigation is still possible due to * the fact that the cache uses 44bit internally. Use the cache bits * instead of the reported physical bits and adjust them on the affected * machines to 44bit if the reported bits are less than 44. */ static void override_cache_bits(struct cpuinfo_x86 *c) { if (c->x86 != 6) return; switch (c->x86_vfm) { case INTEL_NEHALEM: case INTEL_WESTMERE: case INTEL_SANDYBRIDGE: case INTEL_IVYBRIDGE: case INTEL_HASWELL: case INTEL_HASWELL_L: case INTEL_HASWELL_G: case INTEL_BROADWELL: case INTEL_BROADWELL_G: case INTEL_SKYLAKE_L: case INTEL_SKYLAKE: case INTEL_KABYLAKE_L: case INTEL_KABYLAKE: if (c->x86_cache_bits < 44) c->x86_cache_bits = 44; break; } } static void __init l1tf_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_L1TF)) { l1tf_mitigation = L1TF_MITIGATION_OFF; return; } if (l1tf_mitigation != L1TF_MITIGATION_AUTO) return; if (!should_mitigate_vuln(X86_BUG_L1TF)) { l1tf_mitigation = L1TF_MITIGATION_OFF; return; } if (smt_mitigations == SMT_MITIGATIONS_ON) l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; else l1tf_mitigation = L1TF_MITIGATION_FLUSH; } static void __init l1tf_apply_mitigation(void) { u64 half_pa; if (!boot_cpu_has_bug(X86_BUG_L1TF)) return; override_cache_bits(&boot_cpu_data); switch (l1tf_mitigation) { case L1TF_MITIGATION_OFF: case L1TF_MITIGATION_FLUSH_NOWARN: case L1TF_MITIGATION_FLUSH: case L1TF_MITIGATION_AUTO: break; case L1TF_MITIGATION_FLUSH_NOSMT: case L1TF_MITIGATION_FULL: cpu_smt_disable(false); break; case L1TF_MITIGATION_FULL_FORCE: cpu_smt_disable(true); break; } #if CONFIG_PGTABLE_LEVELS == 2 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); return; #endif half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; if (l1tf_mitigation != L1TF_MITIGATION_OFF && e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", half_pa); pr_info("However, doing so will make a part of your RAM unusable.\n"); pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n"); return; } setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); } static int __init l1tf_cmdline(char *str) { if (!boot_cpu_has_bug(X86_BUG_L1TF)) return 0; if (!str) return -EINVAL; if (!strcmp(str, "off")) l1tf_mitigation = L1TF_MITIGATION_OFF; else if (!strcmp(str, "flush,nowarn")) l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; else if (!strcmp(str, "flush")) l1tf_mitigation = L1TF_MITIGATION_FLUSH; else if (!strcmp(str, "flush,nosmt")) l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; else if (!strcmp(str, "full")) l1tf_mitigation = L1TF_MITIGATION_FULL; else if (!strcmp(str, "full,force")) l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; return 0; } early_param("l1tf", l1tf_cmdline); #undef pr_fmt #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt static const char * const srso_strings[] = { [SRSO_MITIGATION_NONE] = "Vulnerable", [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode", [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET", [SRSO_MITIGATION_NOSMT] = "Mitigation: SMT disabled", [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET", [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only", [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation" }; static int __init srso_parse_cmdline(char *str) { if (!str) return -EINVAL; if (!strcmp(str, "off")) srso_mitigation = SRSO_MITIGATION_NONE; else if (!strcmp(str, "microcode")) srso_mitigation = SRSO_MITIGATION_MICROCODE; else if (!strcmp(str, "safe-ret")) srso_mitigation = SRSO_MITIGATION_SAFE_RET; else if (!strcmp(str, "ibpb")) srso_mitigation = SRSO_MITIGATION_IBPB; else if (!strcmp(str, "ibpb-vmexit")) srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; else pr_err("Ignoring unknown SRSO option (%s).", str); return 0; } early_param("spec_rstack_overflow", srso_parse_cmdline); #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options." static void __init srso_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_SRSO)) { srso_mitigation = SRSO_MITIGATION_NONE; return; } if (srso_mitigation == SRSO_MITIGATION_AUTO) { /* * Use safe-RET if user->kernel or guest->host protection is * required. Otherwise the 'microcode' mitigation is sufficient * to protect the user->user and guest->guest vectors. */ if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) && !boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) { srso_mitigation = SRSO_MITIGATION_SAFE_RET; } else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) { srso_mitigation = SRSO_MITIGATION_MICROCODE; } else { srso_mitigation = SRSO_MITIGATION_NONE; return; } } /* Zen1/2 with SMT off aren't vulnerable to SRSO. */ if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { srso_mitigation = SRSO_MITIGATION_NOSMT; return; } if (!boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) { pr_warn("IBPB-extending microcode not applied!\n"); pr_warn(SRSO_NOTICE); /* * Safe-RET provides partial mitigation without microcode, but * other mitigations require microcode to provide any * mitigations. */ if (srso_mitigation == SRSO_MITIGATION_SAFE_RET) srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED; else srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED; } switch (srso_mitigation) { case SRSO_MITIGATION_SAFE_RET: case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED: if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) { srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; goto ibpb_on_vmexit; } if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) { pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n"); srso_mitigation = SRSO_MITIGATION_NONE; } break; ibpb_on_vmexit: case SRSO_MITIGATION_IBPB_ON_VMEXIT: if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) { pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n"); srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE; break; } fallthrough; case SRSO_MITIGATION_IBPB: if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); srso_mitigation = SRSO_MITIGATION_NONE; } break; default: break; } } static void __init srso_update_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_SRSO)) return; /* If retbleed is using IBPB, that works for SRSO as well */ if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB && boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) srso_mitigation = SRSO_MITIGATION_IBPB; pr_info("%s\n", srso_strings[srso_mitigation]); } static void __init srso_apply_mitigation(void) { /* * Clear the feature flag if this mitigation is not selected as that * feature flag controls the BpSpecReduce MSR bit toggling in KVM. */ if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE) setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE); if (srso_mitigation == SRSO_MITIGATION_NONE) { if (boot_cpu_has(X86_FEATURE_SBPB)) x86_pred_cmd = PRED_CMD_SBPB; return; } switch (srso_mitigation) { case SRSO_MITIGATION_SAFE_RET: case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED: /* * Enable the return thunk for generated code * like ftrace, static_call, etc. */ setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_UNRET); if (boot_cpu_data.x86 == 0x19) { setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); set_return_thunk(srso_alias_return_thunk); } else { setup_force_cpu_cap(X86_FEATURE_SRSO); set_return_thunk(srso_return_thunk); } break; case SRSO_MITIGATION_IBPB: setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); /* * IBPB on entry already obviates the need for * software-based untraining so clear those in case some * other mitigation like Retbleed has selected them. */ setup_clear_cpu_cap(X86_FEATURE_UNRET); setup_clear_cpu_cap(X86_FEATURE_RETHUNK); fallthrough; case SRSO_MITIGATION_IBPB_ON_VMEXIT: setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); /* * There is no need for RSB filling: entry_ibpb() ensures * all predictions, including the RSB, are invalidated, * regardless of IBPB implementation. */ setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); break; default: break; } } #undef pr_fmt #define pr_fmt(fmt) "VMSCAPE: " fmt enum vmscape_mitigations { VMSCAPE_MITIGATION_NONE, VMSCAPE_MITIGATION_AUTO, VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER, VMSCAPE_MITIGATION_IBPB_ON_VMEXIT, }; static const char * const vmscape_strings[] = { [VMSCAPE_MITIGATION_NONE] = "Vulnerable", /* [VMSCAPE_MITIGATION_AUTO] */ [VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER] = "Mitigation: IBPB before exit to userspace", [VMSCAPE_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT", }; static enum vmscape_mitigations vmscape_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE; static int __init vmscape_parse_cmdline(char *str) { if (!str) return -EINVAL; if (!strcmp(str, "off")) { vmscape_mitigation = VMSCAPE_MITIGATION_NONE; } else if (!strcmp(str, "ibpb")) { vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER; } else if (!strcmp(str, "force")) { setup_force_cpu_bug(X86_BUG_VMSCAPE); vmscape_mitigation = VMSCAPE_MITIGATION_AUTO; } else { pr_err("Ignoring unknown vmscape=%s option.\n", str); } return 0; } early_param("vmscape", vmscape_parse_cmdline); static void __init vmscape_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_VMSCAPE) || !boot_cpu_has(X86_FEATURE_IBPB)) { vmscape_mitigation = VMSCAPE_MITIGATION_NONE; return; } if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO) { if (should_mitigate_vuln(X86_BUG_VMSCAPE)) vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER; else vmscape_mitigation = VMSCAPE_MITIGATION_NONE; } } static void __init vmscape_update_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_VMSCAPE)) return; if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB || srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT) vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT; pr_info("%s\n", vmscape_strings[vmscape_mitigation]); } static void __init vmscape_apply_mitigation(void) { if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER) setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER); } #undef pr_fmt #define pr_fmt(fmt) fmt #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" #define VMSCAPE_MSG_SMT "VMSCAPE: SMT on, STIBP is required for full protection. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/vmscape.html for more details.\n" void cpu_bugs_smt_update(void) { mutex_lock(&spec_ctrl_mutex); if (sched_smt_active() && unprivileged_ebpf_enabled() && spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); switch (spectre_v2_user_stibp) { case SPECTRE_V2_USER_NONE: break; case SPECTRE_V2_USER_STRICT: case SPECTRE_V2_USER_STRICT_PREFERRED: update_stibp_strict(); break; case SPECTRE_V2_USER_PRCTL: case SPECTRE_V2_USER_SECCOMP: update_indir_branch_cond(); break; } switch (mds_mitigation) { case MDS_MITIGATION_FULL: case MDS_MITIGATION_AUTO: case MDS_MITIGATION_VMWERV: if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) pr_warn_once(MDS_MSG_SMT); update_mds_branch_idle(); break; case MDS_MITIGATION_OFF: break; } switch (taa_mitigation) { case TAA_MITIGATION_VERW: case TAA_MITIGATION_AUTO: case TAA_MITIGATION_UCODE_NEEDED: if (sched_smt_active()) pr_warn_once(TAA_MSG_SMT); break; case TAA_MITIGATION_TSX_DISABLED: case TAA_MITIGATION_OFF: break; } switch (mmio_mitigation) { case MMIO_MITIGATION_VERW: case MMIO_MITIGATION_AUTO: case MMIO_MITIGATION_UCODE_NEEDED: if (sched_smt_active()) pr_warn_once(MMIO_MSG_SMT); break; case MMIO_MITIGATION_OFF: break; } switch (tsa_mitigation) { case TSA_MITIGATION_USER_KERNEL: case TSA_MITIGATION_VM: case TSA_MITIGATION_AUTO: case TSA_MITIGATION_FULL: /* * TSA-SQ can potentially lead to info leakage between * SMT threads. */ if (sched_smt_active()) static_branch_enable(&cpu_buf_idle_clear); else static_branch_disable(&cpu_buf_idle_clear); break; case TSA_MITIGATION_NONE: case TSA_MITIGATION_UCODE_NEEDED: break; } switch (vmscape_mitigation) { case VMSCAPE_MITIGATION_NONE: case VMSCAPE_MITIGATION_AUTO: break; case VMSCAPE_MITIGATION_IBPB_ON_VMEXIT: case VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER: /* * Hypervisors can be attacked across-threads, warn for SMT when * STIBP is not already enabled system-wide. * * Intel eIBRS (!AUTOIBRS) implies STIBP on. */ if (!sched_smt_active() || spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED || (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && !boot_cpu_has(X86_FEATURE_AUTOIBRS))) break; pr_warn_once(VMSCAPE_MSG_SMT); break; } mutex_unlock(&spec_ctrl_mutex); } #ifdef CONFIG_SYSFS #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" #if IS_ENABLED(CONFIG_KVM_INTEL) static const char * const l1tf_vmx_states[] = { [VMENTER_L1D_FLUSH_AUTO] = "auto", [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" }; static ssize_t l1tf_show_state(char *buf) { if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && sched_smt_active())) { return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, l1tf_vmx_states[l1tf_vmx_mitigation]); } return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, l1tf_vmx_states[l1tf_vmx_mitigation], sched_smt_active() ? "vulnerable" : "disabled"); } static ssize_t itlb_multihit_show_state(char *buf) { if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || !boot_cpu_has(X86_FEATURE_VMX)) return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n"); else if (!(cr4_read_shadow() & X86_CR4_VMXE)) return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n"); else if (itlb_multihit_kvm_mitigation) return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n"); else return sysfs_emit(buf, "KVM: Vulnerable\n"); } #else static ssize_t l1tf_show_state(char *buf) { return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); } static ssize_t itlb_multihit_show_state(char *buf) { return sysfs_emit(buf, "Processor vulnerable\n"); } #endif static ssize_t mds_show_state(char *buf) { if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { return sysfs_emit(buf, "%s; SMT Host state unknown\n", mds_strings[mds_mitigation]); } if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : sched_smt_active() ? "mitigated" : "disabled")); } return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], sched_smt_active() ? "vulnerable" : "disabled"); } static ssize_t tsx_async_abort_show_state(char *buf) { if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || (taa_mitigation == TAA_MITIGATION_OFF)) return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]); if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { return sysfs_emit(buf, "%s; SMT Host state unknown\n", taa_strings[taa_mitigation]); } return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], sched_smt_active() ? "vulnerable" : "disabled"); } static ssize_t mmio_stale_data_show_state(char *buf) { if (mmio_mitigation == MMIO_MITIGATION_OFF) return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { return sysfs_emit(buf, "%s; SMT Host state unknown\n", mmio_strings[mmio_mitigation]); } return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], sched_smt_active() ? "vulnerable" : "disabled"); } static ssize_t rfds_show_state(char *buf) { return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]); } static ssize_t old_microcode_show_state(char *buf) { if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) return sysfs_emit(buf, "Unknown: running under hypervisor"); return sysfs_emit(buf, "Vulnerable\n"); } static ssize_t its_show_state(char *buf) { return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]); } static char *stibp_state(void) { if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && !boot_cpu_has(X86_FEATURE_AUTOIBRS)) return ""; switch (spectre_v2_user_stibp) { case SPECTRE_V2_USER_NONE: return "; STIBP: disabled"; case SPECTRE_V2_USER_STRICT: return "; STIBP: forced"; case SPECTRE_V2_USER_STRICT_PREFERRED: return "; STIBP: always-on"; case SPECTRE_V2_USER_PRCTL: case SPECTRE_V2_USER_SECCOMP: if (static_key_enabled(&switch_to_cond_stibp)) return "; STIBP: conditional"; } return ""; } static char *ibpb_state(void) { if (boot_cpu_has(X86_FEATURE_IBPB)) { if (static_key_enabled(&switch_mm_always_ibpb)) return "; IBPB: always-on"; if (static_key_enabled(&switch_mm_cond_ibpb)) return "; IBPB: conditional"; return "; IBPB: disabled"; } return ""; } static char *pbrsb_eibrs_state(void) { if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) return "; PBRSB-eIBRS: SW sequence"; else return "; PBRSB-eIBRS: Vulnerable"; } else { return "; PBRSB-eIBRS: Not affected"; } } static const char *spectre_bhi_state(void) { if (!boot_cpu_has_bug(X86_BUG_BHI)) return "; BHI: Not affected"; else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW)) return "; BHI: BHI_DIS_S"; else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP)) return "; BHI: SW loop, KVM: SW loop"; else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) && rrsba_disabled) return "; BHI: Retpoline"; else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT)) return "; BHI: Vulnerable, KVM: SW loop"; return "; BHI: Vulnerable"; } static ssize_t spectre_v2_show_state(char *buf) { if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); if (sched_smt_active() && unprivileged_ebpf_enabled() && spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], ibpb_state(), boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "", stibp_state(), boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "", pbrsb_eibrs_state(), spectre_bhi_state(), /* this should always be at the end */ spectre_v2_module_string()); } static ssize_t srbds_show_state(char *buf) { return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]); } static ssize_t retbleed_show_state(char *buf) { if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation], !sched_smt_active() ? "disabled" : spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? "enabled with STIBP protection" : "vulnerable"); } return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); } static ssize_t srso_show_state(char *buf) { return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]); } static ssize_t gds_show_state(char *buf) { return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); } static ssize_t tsa_show_state(char *buf) { return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]); } static ssize_t vmscape_show_state(char *buf) { return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]); } static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { if (!boot_cpu_has_bug(bug)) return sysfs_emit(buf, "Not affected\n"); switch (bug) { case X86_BUG_CPU_MELTDOWN: if (boot_cpu_has(X86_FEATURE_PTI)) return sysfs_emit(buf, "Mitigation: PTI\n"); if (hypervisor_is_type(X86_HYPER_XEN_PV)) return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); break; case X86_BUG_SPECTRE_V1: return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); case X86_BUG_SPECTRE_V2: return spectre_v2_show_state(buf); case X86_BUG_SPEC_STORE_BYPASS: return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]); case X86_BUG_L1TF: if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) return l1tf_show_state(buf); break; case X86_BUG_MDS: return mds_show_state(buf); case X86_BUG_TAA: return tsx_async_abort_show_state(buf); case X86_BUG_ITLB_MULTIHIT: return itlb_multihit_show_state(buf); case X86_BUG_SRBDS: return srbds_show_state(buf); case X86_BUG_MMIO_STALE_DATA: return mmio_stale_data_show_state(buf); case X86_BUG_RETBLEED: return retbleed_show_state(buf); case X86_BUG_SRSO: return srso_show_state(buf); case X86_BUG_GDS: return gds_show_state(buf); case X86_BUG_RFDS: return rfds_show_state(buf); case X86_BUG_OLD_MICROCODE: return old_microcode_show_state(buf); case X86_BUG_ITS: return its_show_state(buf); case X86_BUG_TSA: return tsa_show_state(buf); case X86_BUG_VMSCAPE: return vmscape_show_state(buf); default: break; } return sysfs_emit(buf, "Vulnerable\n"); } ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); } ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); } ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); } ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); } ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); } ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_MDS); } ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_TAA); } ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); } ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); } ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); } ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); } ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_SRSO); } ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_GDS); } ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_RFDS); } ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE); } ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_ITS); } ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_TSA); } ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE); } #endif void __warn_thunk(void) { WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n"); }
10 10 1 1 1 2 5 3 1 1296 1299 267 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008-2011, Intel Corporation. * * Description: Data Center Bridging netlink interface * Author: Lucy Liu <lucy.liu@intel.com> */ #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/slab.h> #include <net/netlink.h> #include <net/rtnetlink.h> #include <linux/dcbnl.h> #include <net/dcbevent.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <net/sock.h> /* Data Center Bridging (DCB) is a collection of Ethernet enhancements * intended to allow network traffic with differing requirements * (highly reliable, no drops vs. best effort vs. low latency) to operate * and co-exist on Ethernet. Current DCB features are: * * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a * framework for assigning bandwidth guarantees to traffic classes. * * Priority-based Flow Control (PFC) - provides a flow control mechanism which * can work independently for each 802.1p priority. * * Congestion Notification - provides a mechanism for end-to-end congestion * control for protocols which do not have built-in congestion management. * * More information about the emerging standards for these Ethernet features * can be found at: http://www.ieee802.org/1/pages/dcbridges.html * * This file implements an rtnetlink interface to allow configuration of DCB * features for capable devices. */ /**************** DCB attribute policies *************************************/ /* DCB netlink attributes policy */ static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = { [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1}, [DCB_ATTR_STATE] = {.type = NLA_U8}, [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED}, [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED}, [DCB_ATTR_SET_ALL] = {.type = NLA_U8}, [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG}, [DCB_ATTR_CAP] = {.type = NLA_NESTED}, [DCB_ATTR_PFC_STATE] = {.type = NLA_U8}, [DCB_ATTR_BCN] = {.type = NLA_NESTED}, [DCB_ATTR_APP] = {.type = NLA_NESTED}, [DCB_ATTR_IEEE] = {.type = NLA_NESTED}, [DCB_ATTR_DCBX] = {.type = NLA_U8}, [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED}, }; /* DCB priority flow control to User Priority nested attributes */ static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = { [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG}, }; /* DCB priority grouping nested attributes */ static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = { [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED}, [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG}, }; /* DCB traffic class nested attributes. */ static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = { [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG}, }; /* DCB capabilities nested attributes. */ static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = { [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_CAP_ATTR_PG] = {.type = NLA_U8}, [DCB_CAP_ATTR_PFC] = {.type = NLA_U8}, [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8}, [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8}, [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8}, [DCB_CAP_ATTR_GSP] = {.type = NLA_U8}, [DCB_CAP_ATTR_BCN] = {.type = NLA_U8}, [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8}, }; /* DCB capabilities nested attributes. */ static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = { [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8}, [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8}, }; /* DCB BCN nested attributes. */ static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = { [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG}, [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32}, [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32}, [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32}, [DCB_BCN_ATTR_BETA] = {.type = NLA_U32}, [DCB_BCN_ATTR_GD] = {.type = NLA_U32}, [DCB_BCN_ATTR_GI] = {.type = NLA_U32}, [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32}, [DCB_BCN_ATTR_TD] = {.type = NLA_U32}, [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32}, [DCB_BCN_ATTR_W] = {.type = NLA_U32}, [DCB_BCN_ATTR_RD] = {.type = NLA_U32}, [DCB_BCN_ATTR_RU] = {.type = NLA_U32}, [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32}, [DCB_BCN_ATTR_RI] = {.type = NLA_U32}, [DCB_BCN_ATTR_C] = {.type = NLA_U32}, [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG}, }; /* DCB APP nested attributes. */ static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = { [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8}, [DCB_APP_ATTR_ID] = {.type = NLA_U16}, [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8}, }; /* IEEE 802.1Qaz nested attributes. */ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = { [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)}, [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)}, [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED}, [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)}, [DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)}, [DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)}, [DCB_ATTR_DCB_BUFFER] = {.len = sizeof(struct dcbnl_buffer)}, [DCB_ATTR_DCB_APP_TRUST_TABLE] = {.type = NLA_NESTED}, }; /* DCB number of traffic classes nested attributes. */ static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = { [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8}, [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8}, [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8}, }; static LIST_HEAD(dcb_app_list); static LIST_HEAD(dcb_rewr_list); static DEFINE_SPINLOCK(dcb_lock); static enum ieee_attrs_app dcbnl_app_attr_type_get(u8 selector) { switch (selector) { case IEEE_8021QAZ_APP_SEL_ETHERTYPE: case IEEE_8021QAZ_APP_SEL_STREAM: case IEEE_8021QAZ_APP_SEL_DGRAM: case IEEE_8021QAZ_APP_SEL_ANY: case IEEE_8021QAZ_APP_SEL_DSCP: return DCB_ATTR_IEEE_APP; case DCB_APP_SEL_PCP: return DCB_ATTR_DCB_APP; default: return DCB_ATTR_IEEE_APP_UNSPEC; } } static bool dcbnl_app_attr_type_validate(enum ieee_attrs_app type) { switch (type) { case DCB_ATTR_IEEE_APP: case DCB_ATTR_DCB_APP: return true; default: return false; } } static bool dcbnl_app_selector_validate(enum ieee_attrs_app type, u8 selector) { return dcbnl_app_attr_type_get(selector) == type; } static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq, u32 flags, struct nlmsghdr **nlhp) { struct sk_buff *skb; struct dcbmsg *dcb; struct nlmsghdr *nlh; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return NULL; nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags); BUG_ON(!nlh); dcb = nlmsg_data(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = cmd; dcb->dcb_pad = 0; if (nlhp) *nlhp = nlh; return skb; } static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */ if (!netdev->dcbnl_ops->getstate) return -EOPNOTSUPP; return nla_put_u8(skb, DCB_ATTR_STATE, netdev->dcbnl_ops->getstate(netdev)); } static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest; u8 value; int ret; int i; int getall = 0; if (!tb[DCB_ATTR_PFC_CFG]) return -EINVAL; if (!netdev->dcbnl_ops->getpfccfg) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX, tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL); if (ret) return ret; nest = nla_nest_start_noflag(skb, DCB_ATTR_PFC_CFG); if (!nest) return -EMSGSIZE; if (data[DCB_PFC_UP_ATTR_ALL]) getall = 1; for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { if (!getall && !data[i]) continue; netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); ret = nla_put_u8(skb, i, value); if (ret) { nla_nest_cancel(skb, nest); return ret; } } nla_nest_end(skb, nest); return 0; } static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 perm_addr[MAX_ADDR_LEN]; if (!netdev->dcbnl_ops->getpermhwaddr) return -EOPNOTSUPP; memset(perm_addr, 0, sizeof(perm_addr)); netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); } static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest; u8 value; int ret; int i; int getall = 0; if (!tb[DCB_ATTR_CAP]) return -EINVAL; if (!netdev->dcbnl_ops->getcap) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP], dcbnl_cap_nest, NULL); if (ret) return ret; nest = nla_nest_start_noflag(skb, DCB_ATTR_CAP); if (!nest) return -EMSGSIZE; if (data[DCB_CAP_ATTR_ALL]) getall = 1; for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) { if (!getall && !data[i]) continue; if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) { ret = nla_put_u8(skb, i, value); if (ret) { nla_nest_cancel(skb, nest); return ret; } } } nla_nest_end(skb, nest); return 0; } static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest; u8 value; int ret; int i; int getall = 0; if (!tb[DCB_ATTR_NUMTCS]) return -EINVAL; if (!netdev->dcbnl_ops->getnumtcs) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], dcbnl_numtcs_nest, NULL); if (ret) return ret; nest = nla_nest_start_noflag(skb, DCB_ATTR_NUMTCS); if (!nest) return -EMSGSIZE; if (data[DCB_NUMTCS_ATTR_ALL]) getall = 1; for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { if (!getall && !data[i]) continue; ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value); if (!ret) { ret = nla_put_u8(skb, i, value); if (ret) { nla_nest_cancel(skb, nest); return ret; } } else return -EINVAL; } nla_nest_end(skb, nest); return 0; } static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1]; int ret; u8 value; int i; if (!tb[DCB_ATTR_NUMTCS]) return -EINVAL; if (!netdev->dcbnl_ops->setnumtcs) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], dcbnl_numtcs_nest, NULL); if (ret) return ret; for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { if (data[i] == NULL) continue; value = nla_get_u8(data[i]); ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value); if (ret) break; } return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret); } static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { if (!netdev->dcbnl_ops->getpfcstate) return -EOPNOTSUPP; return nla_put_u8(skb, DCB_ATTR_PFC_STATE, netdev->dcbnl_ops->getpfcstate(netdev)); } static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 value; if (!tb[DCB_ATTR_PFC_STATE]) return -EINVAL; if (!netdev->dcbnl_ops->setpfcstate) return -EOPNOTSUPP; value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]); netdev->dcbnl_ops->setpfcstate(netdev, value); return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0); } static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *app_nest; struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; u16 id; u8 up, idtype; int ret; if (!tb[DCB_ATTR_APP]) return -EINVAL; ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], dcbnl_app_nest, NULL); if (ret) return ret; /* all must be non-null */ if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || (!app_tb[DCB_APP_ATTR_ID])) return -EINVAL; /* either by eth type or by socket number */ idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && (idtype != DCB_APP_IDTYPE_PORTNUM)) return -EINVAL; id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); if (netdev->dcbnl_ops->getapp) { ret = netdev->dcbnl_ops->getapp(netdev, idtype, id); if (ret < 0) return ret; else up = ret; } else { struct dcb_app app = { .selector = idtype, .protocol = id, }; up = dcb_getapp(netdev, &app); } app_nest = nla_nest_start_noflag(skb, DCB_ATTR_APP); if (!app_nest) return -EMSGSIZE; ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype); if (ret) goto out_cancel; ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id); if (ret) goto out_cancel; ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up); if (ret) goto out_cancel; nla_nest_end(skb, app_nest); return 0; out_cancel: nla_nest_cancel(skb, app_nest); return ret; } static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { int ret; u16 id; u8 up, idtype; struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; if (!tb[DCB_ATTR_APP]) return -EINVAL; ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], dcbnl_app_nest, NULL); if (ret) return ret; /* all must be non-null */ if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || (!app_tb[DCB_APP_ATTR_ID]) || (!app_tb[DCB_APP_ATTR_PRIORITY])) return -EINVAL; /* either by eth type or by socket number */ idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && (idtype != DCB_APP_IDTYPE_PORTNUM)) return -EINVAL; id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]); if (netdev->dcbnl_ops->setapp) { ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up); if (ret < 0) return ret; } else { struct dcb_app app; app.selector = idtype; app.protocol = id; app.priority = up; ret = dcb_setapp(netdev, &app); } ret = nla_put_u8(skb, DCB_ATTR_APP, ret); dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0); return ret; } static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, struct nlattr **tb, struct sk_buff *skb, int dir) { struct nlattr *pg_nest, *param_nest, *data; struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; u8 prio, pgid, tc_pct, up_map; int ret; int getall = 0; int i; if (!tb[DCB_ATTR_PG_CFG]) return -EINVAL; if (!netdev->dcbnl_ops->getpgtccfgtx || !netdev->dcbnl_ops->getpgtccfgrx || !netdev->dcbnl_ops->getpgbwgcfgtx || !netdev->dcbnl_ops->getpgbwgcfgrx) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest, NULL); if (ret) return ret; pg_nest = nla_nest_start_noflag(skb, DCB_ATTR_PG_CFG); if (!pg_nest) return -EMSGSIZE; if (pg_tb[DCB_PG_ATTR_TC_ALL]) getall = 1; for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { if (!getall && !pg_tb[i]) continue; if (pg_tb[DCB_PG_ATTR_TC_ALL]) data = pg_tb[DCB_PG_ATTR_TC_ALL]; else data = pg_tb[i]; ret = nla_parse_nested_deprecated(param_tb, DCB_TC_ATTR_PARAM_MAX, data, dcbnl_tc_param_nest, NULL); if (ret) goto err_pg; param_nest = nla_nest_start_noflag(skb, i); if (!param_nest) goto err_pg; pgid = DCB_ATTR_VALUE_UNDEFINED; prio = DCB_ATTR_VALUE_UNDEFINED; tc_pct = DCB_ATTR_VALUE_UNDEFINED; up_map = DCB_ATTR_VALUE_UNDEFINED; if (dir) { /* Rx */ netdev->dcbnl_ops->getpgtccfgrx(netdev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); } else { /* Tx */ netdev->dcbnl_ops->getpgtccfgtx(netdev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); } if (param_tb[DCB_TC_ATTR_PARAM_PGID] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid); if (ret) goto err_param; } if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); if (ret) goto err_param; } if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); if (ret) goto err_param; } if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct); if (ret) goto err_param; } nla_nest_end(skb, param_nest); } if (pg_tb[DCB_PG_ATTR_BW_ID_ALL]) getall = 1; else getall = 0; for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { if (!getall && !pg_tb[i]) continue; tc_pct = DCB_ATTR_VALUE_UNDEFINED; if (dir) { /* Rx */ netdev->dcbnl_ops->getpgbwgcfgrx(netdev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); } else { /* Tx */ netdev->dcbnl_ops->getpgbwgcfgtx(netdev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); } ret = nla_put_u8(skb, i, tc_pct); if (ret) goto err_pg; } nla_nest_end(skb, pg_nest); return 0; err_param: nla_nest_cancel(skb, param_nest); err_pg: nla_nest_cancel(skb, pg_nest); return -EMSGSIZE; } static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0); } static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1); } static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 value; if (!tb[DCB_ATTR_STATE]) return -EINVAL; if (!netdev->dcbnl_ops->setstate) return -EOPNOTSUPP; value = nla_get_u8(tb[DCB_ATTR_STATE]); return nla_put_u8(skb, DCB_ATTR_STATE, netdev->dcbnl_ops->setstate(netdev, value)); } static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1]; int i; int ret; u8 value; if (!tb[DCB_ATTR_PFC_CFG]) return -EINVAL; if (!netdev->dcbnl_ops->setpfccfg) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX, tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL); if (ret) return ret; for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { if (data[i] == NULL) continue; value = nla_get_u8(data[i]); netdev->dcbnl_ops->setpfccfg(netdev, data[i]->nla_type - DCB_PFC_UP_ATTR_0, value); } return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0); } static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { int ret; if (!tb[DCB_ATTR_SET_ALL]) return -EINVAL; if (!netdev->dcbnl_ops->setall) return -EOPNOTSUPP; ret = nla_put_u8(skb, DCB_ATTR_SET_ALL, netdev->dcbnl_ops->setall(netdev)); dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0); return ret; } static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb, int dir) { struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; int ret; int i; u8 pgid; u8 up_map; u8 prio; u8 tc_pct; if (!tb[DCB_ATTR_PG_CFG]) return -EINVAL; if (!netdev->dcbnl_ops->setpgtccfgtx || !netdev->dcbnl_ops->setpgtccfgrx || !netdev->dcbnl_ops->setpgbwgcfgtx || !netdev->dcbnl_ops->setpgbwgcfgrx) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest, NULL); if (ret) return ret; for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { if (!pg_tb[i]) continue; ret = nla_parse_nested_deprecated(param_tb, DCB_TC_ATTR_PARAM_MAX, pg_tb[i], dcbnl_tc_param_nest, NULL); if (ret) return ret; pgid = DCB_ATTR_VALUE_UNDEFINED; prio = DCB_ATTR_VALUE_UNDEFINED; tc_pct = DCB_ATTR_VALUE_UNDEFINED; up_map = DCB_ATTR_VALUE_UNDEFINED; if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]) prio = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]); if (param_tb[DCB_TC_ATTR_PARAM_PGID]) pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]); if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT]) tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]); if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]) up_map = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]); /* dir: Tx = 0, Rx = 1 */ if (dir) { /* Rx */ netdev->dcbnl_ops->setpgtccfgrx(netdev, i - DCB_PG_ATTR_TC_0, prio, pgid, tc_pct, up_map); } else { /* Tx */ netdev->dcbnl_ops->setpgtccfgtx(netdev, i - DCB_PG_ATTR_TC_0, prio, pgid, tc_pct, up_map); } } for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { if (!pg_tb[i]) continue; tc_pct = nla_get_u8(pg_tb[i]); /* dir: Tx = 0, Rx = 1 */ if (dir) { /* Rx */ netdev->dcbnl_ops->setpgbwgcfgrx(netdev, i - DCB_PG_ATTR_BW_ID_0, tc_pct); } else { /* Tx */ netdev->dcbnl_ops->setpgbwgcfgtx(netdev, i - DCB_PG_ATTR_BW_ID_0, tc_pct); } } return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0); } static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0); } static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1); } static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *bcn_nest; struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1]; u8 value_byte; u32 value_integer; int ret; bool getall = false; int i; if (!tb[DCB_ATTR_BCN]) return -EINVAL; if (!netdev->dcbnl_ops->getbcnrp || !netdev->dcbnl_ops->getbcncfg) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(bcn_tb, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN], dcbnl_bcn_nest, NULL); if (ret) return ret; bcn_nest = nla_nest_start_noflag(skb, DCB_ATTR_BCN); if (!bcn_nest) return -EMSGSIZE; if (bcn_tb[DCB_BCN_ATTR_ALL]) getall = true; for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { if (!getall && !bcn_tb[i]) continue; netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0, &value_byte); ret = nla_put_u8(skb, i, value_byte); if (ret) goto err_bcn; } for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { if (!getall && !bcn_tb[i]) continue; netdev->dcbnl_ops->getbcncfg(netdev, i, &value_integer); ret = nla_put_u32(skb, i, value_integer); if (ret) goto err_bcn; } nla_nest_end(skb, bcn_nest); return 0; err_bcn: nla_nest_cancel(skb, bcn_nest); return ret; } static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_BCN_ATTR_MAX + 1]; int i; int ret; u8 value_byte; u32 value_int; if (!tb[DCB_ATTR_BCN]) return -EINVAL; if (!netdev->dcbnl_ops->setbcncfg || !netdev->dcbnl_ops->setbcnrp) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN], dcbnl_bcn_nest, NULL); if (ret) return ret; for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { if (data[i] == NULL) continue; value_byte = nla_get_u8(data[i]); netdev->dcbnl_ops->setbcnrp(netdev, data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte); } for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { if (data[i] == NULL) continue; value_int = nla_get_u32(data[i]); netdev->dcbnl_ops->setbcncfg(netdev, i, value_int); } return nla_put_u8(skb, DCB_ATTR_BCN, 0); } static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb, int app_nested_type, int app_info_type, int app_entry_type) { struct dcb_peer_app_info info; struct dcb_app *table = NULL; const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; u16 app_count; int err; /** * retrieve the peer app configuration form the driver. If the driver * handlers fail exit without doing anything */ err = ops->peer_getappinfo(netdev, &info, &app_count); if (!err && app_count) { table = kmalloc_array(app_count, sizeof(struct dcb_app), GFP_KERNEL); if (!table) return -ENOMEM; err = ops->peer_getapptable(netdev, table); } if (!err) { u16 i; struct nlattr *app; /** * build the message, from here on the only possible failure * is due to the skb size */ err = -EMSGSIZE; app = nla_nest_start_noflag(skb, app_nested_type); if (!app) goto nla_put_failure; if (app_info_type && nla_put(skb, app_info_type, sizeof(info), &info)) goto nla_put_failure; for (i = 0; i < app_count; i++) { if (nla_put(skb, app_entry_type, sizeof(struct dcb_app), &table[i])) goto nla_put_failure; } nla_nest_end(skb, app); } err = 0; nla_put_failure: kfree(table); return err; } static int dcbnl_getapptrust(struct net_device *netdev, struct sk_buff *skb) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; enum ieee_attrs_app type; struct nlattr *apptrust; int nselectors, err, i; u8 *selectors; selectors = kzalloc(IEEE_8021QAZ_APP_SEL_MAX + 1, GFP_KERNEL); if (!selectors) return -ENOMEM; err = ops->dcbnl_getapptrust(netdev, selectors, &nselectors); if (err) { err = 0; goto out; } apptrust = nla_nest_start(skb, DCB_ATTR_DCB_APP_TRUST_TABLE); if (!apptrust) { err = -EMSGSIZE; goto out; } for (i = 0; i < nselectors; i++) { type = dcbnl_app_attr_type_get(selectors[i]); err = nla_put_u8(skb, type, selectors[i]); if (err) { nla_nest_cancel(skb, apptrust); goto out; } } nla_nest_end(skb, apptrust); out: kfree(selectors); return err; } /* Set or delete APP table or rewrite table entries. The APP struct is validated * and the appropriate callback function is called. */ static int dcbnl_app_table_setdel(struct nlattr *attr, struct net_device *netdev, int (*setdel)(struct net_device *dev, struct dcb_app *app)) { struct dcb_app *app_data; enum ieee_attrs_app type; struct nlattr *attr_itr; int rem, err; nla_for_each_nested(attr_itr, attr, rem) { type = nla_type(attr_itr); if (!dcbnl_app_attr_type_validate(type)) continue; if (nla_len(attr_itr) < sizeof(struct dcb_app)) return -ERANGE; app_data = nla_data(attr_itr); if (!dcbnl_app_selector_validate(type, app_data->selector)) return -EINVAL; err = setdel(netdev, app_data); if (err) return err; } return 0; } /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; struct nlattr *ieee, *app, *rewr; struct dcb_app_type *itr; int dcbx; int err; if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) return -EMSGSIZE; ieee = nla_nest_start_noflag(skb, DCB_ATTR_IEEE); if (!ieee) return -EMSGSIZE; if (ops->ieee_getets) { struct ieee_ets ets; memset(&ets, 0, sizeof(ets)); err = ops->ieee_getets(netdev, &ets); if (!err && nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) return -EMSGSIZE; } if (ops->ieee_getmaxrate) { struct ieee_maxrate maxrate; memset(&maxrate, 0, sizeof(maxrate)); err = ops->ieee_getmaxrate(netdev, &maxrate); if (!err) { err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, sizeof(maxrate), &maxrate); if (err) return -EMSGSIZE; } } if (ops->ieee_getqcn) { struct ieee_qcn qcn; memset(&qcn, 0, sizeof(qcn)); err = ops->ieee_getqcn(netdev, &qcn); if (!err) { err = nla_put(skb, DCB_ATTR_IEEE_QCN, sizeof(qcn), &qcn); if (err) return -EMSGSIZE; } } if (ops->ieee_getqcnstats) { struct ieee_qcn_stats qcn_stats; memset(&qcn_stats, 0, sizeof(qcn_stats)); err = ops->ieee_getqcnstats(netdev, &qcn_stats); if (!err) { err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS, sizeof(qcn_stats), &qcn_stats); if (err) return -EMSGSIZE; } } if (ops->ieee_getpfc) { struct ieee_pfc pfc; memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) return -EMSGSIZE; } if (ops->dcbnl_getbuffer) { struct dcbnl_buffer buffer; memset(&buffer, 0, sizeof(buffer)); err = ops->dcbnl_getbuffer(netdev, &buffer); if (!err && nla_put(skb, DCB_ATTR_DCB_BUFFER, sizeof(buffer), &buffer)) return -EMSGSIZE; } app = nla_nest_start_noflag(skb, DCB_ATTR_IEEE_APP_TABLE); if (!app) return -EMSGSIZE; spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == netdev->ifindex) { enum ieee_attrs_app type = dcbnl_app_attr_type_get(itr->app.selector); err = nla_put(skb, type, sizeof(itr->app), &itr->app); if (err) { spin_unlock_bh(&dcb_lock); return -EMSGSIZE; } } } if (netdev->dcbnl_ops->getdcbx) dcbx = netdev->dcbnl_ops->getdcbx(netdev); else dcbx = -EOPNOTSUPP; spin_unlock_bh(&dcb_lock); nla_nest_end(skb, app); rewr = nla_nest_start(skb, DCB_ATTR_DCB_REWR_TABLE); if (!rewr) return -EMSGSIZE; spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_rewr_list, list) { if (itr->ifindex == netdev->ifindex) { enum ieee_attrs_app type = dcbnl_app_attr_type_get(itr->app.selector); err = nla_put(skb, type, sizeof(itr->app), &itr->app); if (err) { spin_unlock_bh(&dcb_lock); nla_nest_cancel(skb, rewr); return -EMSGSIZE; } } } spin_unlock_bh(&dcb_lock); nla_nest_end(skb, rewr); if (ops->dcbnl_getapptrust) { err = dcbnl_getapptrust(netdev, skb); if (err) return err; } /* get peer info if available */ if (ops->ieee_peer_getets) { struct ieee_ets ets; memset(&ets, 0, sizeof(ets)); err = ops->ieee_peer_getets(netdev, &ets); if (!err && nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) return -EMSGSIZE; } if (ops->ieee_peer_getpfc) { struct ieee_pfc pfc; memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_peer_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) return -EMSGSIZE; } if (ops->peer_getappinfo && ops->peer_getapptable) { err = dcbnl_build_peer_app(netdev, skb, DCB_ATTR_IEEE_PEER_APP, DCB_ATTR_IEEE_APP_UNSPEC, DCB_ATTR_IEEE_APP); if (err) return -EMSGSIZE; } nla_nest_end(skb, ieee); if (dcbx >= 0) { err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); if (err) return -EMSGSIZE; } return 0; } static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev, int dir) { u8 pgid, up_map, prio, tc_pct; const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG; struct nlattr *pg = nla_nest_start_noflag(skb, i); if (!pg) return -EMSGSIZE; for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { struct nlattr *tc_nest = nla_nest_start_noflag(skb, i); if (!tc_nest) return -EMSGSIZE; pgid = DCB_ATTR_VALUE_UNDEFINED; prio = DCB_ATTR_VALUE_UNDEFINED; tc_pct = DCB_ATTR_VALUE_UNDEFINED; up_map = DCB_ATTR_VALUE_UNDEFINED; if (!dir) ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); else ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) || nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) || nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) || nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct)) return -EMSGSIZE; nla_nest_end(skb, tc_nest); } for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { tc_pct = DCB_ATTR_VALUE_UNDEFINED; if (!dir) ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); else ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); if (nla_put_u8(skb, i, tc_pct)) return -EMSGSIZE; } nla_nest_end(skb, pg); return 0; } static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) { struct nlattr *cee, *app; struct dcb_app_type *itr; const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; int dcbx, i, err = -EMSGSIZE; u8 value; if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) goto nla_put_failure; cee = nla_nest_start_noflag(skb, DCB_ATTR_CEE); if (!cee) goto nla_put_failure; /* local pg */ if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) { err = dcbnl_cee_pg_fill(skb, netdev, 1); if (err) goto nla_put_failure; } if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) { err = dcbnl_cee_pg_fill(skb, netdev, 0); if (err) goto nla_put_failure; } /* local pfc */ if (ops->getpfccfg) { struct nlattr *pfc_nest = nla_nest_start_noflag(skb, DCB_ATTR_CEE_PFC); if (!pfc_nest) goto nla_put_failure; for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); if (nla_put_u8(skb, i, value)) goto nla_put_failure; } nla_nest_end(skb, pfc_nest); } /* local app */ spin_lock_bh(&dcb_lock); app = nla_nest_start_noflag(skb, DCB_ATTR_CEE_APP_TABLE); if (!app) goto dcb_unlock; list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == netdev->ifindex) { struct nlattr *app_nest = nla_nest_start_noflag(skb, DCB_ATTR_APP); if (!app_nest) goto dcb_unlock; err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, itr->app.selector); if (err) goto dcb_unlock; err = nla_put_u16(skb, DCB_APP_ATTR_ID, itr->app.protocol); if (err) goto dcb_unlock; err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, itr->app.priority); if (err) goto dcb_unlock; nla_nest_end(skb, app_nest); } } nla_nest_end(skb, app); if (netdev->dcbnl_ops->getdcbx) dcbx = netdev->dcbnl_ops->getdcbx(netdev); else dcbx = -EOPNOTSUPP; spin_unlock_bh(&dcb_lock); /* features flags */ if (ops->getfeatcfg) { struct nlattr *feat = nla_nest_start_noflag(skb, DCB_ATTR_CEE_FEAT); if (!feat) goto nla_put_failure; for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX; i++) if (!ops->getfeatcfg(netdev, i, &value) && nla_put_u8(skb, i, value)) goto nla_put_failure; nla_nest_end(skb, feat); } /* peer info if available */ if (ops->cee_peer_getpg) { struct cee_pg pg; memset(&pg, 0, sizeof(pg)); err = ops->cee_peer_getpg(netdev, &pg); if (!err && nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) goto nla_put_failure; } if (ops->cee_peer_getpfc) { struct cee_pfc pfc; memset(&pfc, 0, sizeof(pfc)); err = ops->cee_peer_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) goto nla_put_failure; } if (ops->peer_getappinfo && ops->peer_getapptable) { err = dcbnl_build_peer_app(netdev, skb, DCB_ATTR_CEE_PEER_APP_TABLE, DCB_ATTR_CEE_PEER_APP_INFO, DCB_ATTR_CEE_PEER_APP); if (err) goto nla_put_failure; } nla_nest_end(skb, cee); /* DCBX state */ if (dcbx >= 0) { err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); if (err) goto nla_put_failure; } return 0; dcb_unlock: spin_unlock_bh(&dcb_lock); nla_put_failure: err = -EMSGSIZE; return err; } static int dcbnl_notify(struct net_device *dev, int event, int cmd, u32 seq, u32 portid, int dcbx_ver) { struct net *net = dev_net(dev); struct sk_buff *skb; struct nlmsghdr *nlh; const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; int err; if (!ops) return -EOPNOTSUPP; skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh); if (!skb) return -ENOMEM; if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE) err = dcbnl_ieee_fill(skb, dev); else err = dcbnl_cee_fill(skb, dev); if (err < 0) { /* Report error to broadcast listeners */ nlmsg_free(skb); rtnl_set_sk_err(net, RTNLGRP_DCB, err); } else { /* End nlmsg and notify broadcast listeners */ nlmsg_end(skb, nlh); rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL); } return err; } int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, u32 seq, u32 portid) { return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE); } EXPORT_SYMBOL(dcbnl_ieee_notify); int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, u32 seq, u32 portid) { return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE); } EXPORT_SYMBOL(dcbnl_cee_notify); /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands. * If any requested operation can not be completed * the entire msg is aborted and error value is returned. * No attempt is made to reconcile the case where only part of the * cmd can be completed. */ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; int prio; int err; if (!ops) return -EOPNOTSUPP; if (!tb[DCB_ATTR_IEEE]) return -EINVAL; err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE], dcbnl_ieee_policy, NULL); if (err) return err; if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) { struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]); err = ops->ieee_setets(netdev, ets); if (err) goto err; } if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) { struct ieee_maxrate *maxrate = nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]); err = ops->ieee_setmaxrate(netdev, maxrate); if (err) goto err; } if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) { struct ieee_qcn *qcn = nla_data(ieee[DCB_ATTR_IEEE_QCN]); err = ops->ieee_setqcn(netdev, qcn); if (err) goto err; } if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); err = ops->ieee_setpfc(netdev, pfc); if (err) goto err; } if (ieee[DCB_ATTR_DCB_BUFFER] && ops->dcbnl_setbuffer) { struct dcbnl_buffer *buffer = nla_data(ieee[DCB_ATTR_DCB_BUFFER]); for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) { if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) { err = -EINVAL; goto err; } } err = ops->dcbnl_setbuffer(netdev, buffer); if (err) goto err; } if (ieee[DCB_ATTR_DCB_REWR_TABLE]) { err = dcbnl_app_table_setdel(ieee[DCB_ATTR_DCB_REWR_TABLE], netdev, ops->dcbnl_setrewr ?: dcb_setrewr); if (err) goto err; } if (ieee[DCB_ATTR_IEEE_APP_TABLE]) { err = dcbnl_app_table_setdel(ieee[DCB_ATTR_IEEE_APP_TABLE], netdev, ops->ieee_setapp ?: dcb_ieee_setapp); if (err) goto err; } if (ieee[DCB_ATTR_DCB_APP_TRUST_TABLE]) { u8 selectors[IEEE_8021QAZ_APP_SEL_MAX + 1] = {0}; struct nlattr *attr; int nselectors = 0; int rem; if (!ops->dcbnl_setapptrust) { err = -EOPNOTSUPP; goto err; } nla_for_each_nested(attr, ieee[DCB_ATTR_DCB_APP_TRUST_TABLE], rem) { enum ieee_attrs_app type = nla_type(attr); u8 selector; int i; if (!dcbnl_app_attr_type_validate(type) || nla_len(attr) != 1 || nselectors >= sizeof(selectors)) { err = -EINVAL; goto err; } selector = nla_get_u8(attr); if (!dcbnl_app_selector_validate(type, selector)) { err = -EINVAL; goto err; } /* Duplicate selector ? */ for (i = 0; i < nselectors; i++) { if (selectors[i] == selector) { err = -EINVAL; goto err; } } selectors[nselectors++] = selector; } err = ops->dcbnl_setapptrust(netdev, selectors, nselectors); if (err) goto err; } err: err = nla_put_u8(skb, DCB_ATTR_IEEE, err); dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0); return err; } static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; if (!ops) return -EOPNOTSUPP; return dcbnl_ieee_fill(skb, netdev); } static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; int err; if (!ops) return -EOPNOTSUPP; if (!tb[DCB_ATTR_IEEE]) return -EINVAL; err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE], dcbnl_ieee_policy, NULL); if (err) return err; if (ieee[DCB_ATTR_IEEE_APP_TABLE]) { err = dcbnl_app_table_setdel(ieee[DCB_ATTR_IEEE_APP_TABLE], netdev, ops->ieee_delapp ?: dcb_ieee_delapp); if (err) goto err; } if (ieee[DCB_ATTR_DCB_REWR_TABLE]) { err = dcbnl_app_table_setdel(ieee[DCB_ATTR_DCB_REWR_TABLE], netdev, ops->dcbnl_delrewr ?: dcb_delrewr); if (err) goto err; } err: err = nla_put_u8(skb, DCB_ATTR_IEEE, err); dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0); return err; } /* DCBX configuration */ static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { if (!netdev->dcbnl_ops->getdcbx) return -EOPNOTSUPP; return nla_put_u8(skb, DCB_ATTR_DCBX, netdev->dcbnl_ops->getdcbx(netdev)); } static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 value; if (!netdev->dcbnl_ops->setdcbx) return -EOPNOTSUPP; if (!tb[DCB_ATTR_DCBX]) return -EINVAL; value = nla_get_u8(tb[DCB_ATTR_DCBX]); return nla_put_u8(skb, DCB_ATTR_DCBX, netdev->dcbnl_ops->setdcbx(netdev, value)); } static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest; u8 value; int ret, i; int getall = 0; if (!netdev->dcbnl_ops->getfeatcfg) return -EOPNOTSUPP; if (!tb[DCB_ATTR_FEATCFG]) return -EINVAL; ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest, NULL); if (ret) return ret; nest = nla_nest_start_noflag(skb, DCB_ATTR_FEATCFG); if (!nest) return -EMSGSIZE; if (data[DCB_FEATCFG_ATTR_ALL]) getall = 1; for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) { if (!getall && !data[i]) continue; ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value); if (!ret) ret = nla_put_u8(skb, i, value); if (ret) { nla_nest_cancel(skb, nest); goto nla_put_failure; } } nla_nest_end(skb, nest); nla_put_failure: return ret; } static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1]; int ret, i; u8 value; if (!netdev->dcbnl_ops->setfeatcfg) return -ENOTSUPP; if (!tb[DCB_ATTR_FEATCFG]) return -EINVAL; ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest, NULL); if (ret) goto err; for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) { if (data[i] == NULL) continue; value = nla_get_u8(data[i]); ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value); if (ret) goto err; } err: ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret); return ret; } /* Handle CEE DCBX GET commands. */ static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; if (!ops) return -EOPNOTSUPP; return dcbnl_cee_fill(skb, netdev); } struct reply_func { /* reply netlink message type */ int type; /* function to fill message contents */ int (*cb)(struct net_device *, struct nlmsghdr *, u32, struct nlattr **, struct sk_buff *); }; static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = { [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate }, [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate }, [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg }, [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg }, [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr }, [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap }, [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs }, [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs }, [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate }, [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate }, [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp }, [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp }, [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg }, [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg }, [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg }, [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg }, [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall }, [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg }, [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg }, [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get }, [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set }, [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del }, [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx }, [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx }, [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg }, [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg }, [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get }, }; static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct net_device *netdev; struct dcbmsg *dcb = nlmsg_data(nlh); struct nlattr *tb[DCB_ATTR_MAX + 1]; u32 portid = NETLINK_CB(skb).portid; int ret = -EINVAL; struct sk_buff *reply_skb; struct nlmsghdr *reply_nlh = NULL; const struct reply_func *fn; if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; ret = nlmsg_parse_deprecated(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX, dcbnl_rtnl_policy, extack); if (ret < 0) return ret; if (dcb->cmd > DCB_CMD_MAX) return -EINVAL; /* check if a reply function has been defined for the command */ fn = &reply_funcs[dcb->cmd]; if (!fn->cb) return -EOPNOTSUPP; if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; if (!tb[DCB_ATTR_IFNAME]) return -EINVAL; netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME])); if (!netdev) return -ENODEV; if (!netdev->dcbnl_ops) return -EOPNOTSUPP; reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq, nlh->nlmsg_flags, &reply_nlh); if (!reply_skb) return -ENOMEM; ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb); if (ret < 0) { nlmsg_free(reply_skb); goto out; } nlmsg_end(reply_skb, reply_nlh); ret = rtnl_unicast(reply_skb, net, portid); out: return ret; } static struct dcb_app_type *dcb_rewr_lookup(const struct dcb_app *app, int ifindex, int proto) { struct dcb_app_type *itr; list_for_each_entry(itr, &dcb_rewr_list, list) { if (itr->app.selector == app->selector && itr->app.priority == app->priority && itr->ifindex == ifindex && ((proto == -1) || itr->app.protocol == proto)) return itr; } return NULL; } static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app, int ifindex, int prio) { struct dcb_app_type *itr; list_for_each_entry(itr, &dcb_app_list, list) { if (itr->app.selector == app->selector && itr->app.protocol == app->protocol && itr->ifindex == ifindex && ((prio == -1) || itr->app.priority == prio)) return itr; } return NULL; } static int dcb_app_add(struct list_head *list, const struct dcb_app *app, int ifindex) { struct dcb_app_type *entry; entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return -ENOMEM; memcpy(&entry->app, app, sizeof(*app)); entry->ifindex = ifindex; list_add(&entry->list, list); return 0; } /** * dcb_getapp - retrieve the DCBX application user priority * @dev: network interface * @app: application to get user priority of * * On success returns a non-zero 802.1p user priority bitmap * otherwise returns 0 as the invalid user priority bitmap to * indicate an error. */ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) { struct dcb_app_type *itr; u8 prio = 0; spin_lock_bh(&dcb_lock); itr = dcb_app_lookup(app, dev->ifindex, -1); if (itr) prio = itr->app.priority; spin_unlock_bh(&dcb_lock); return prio; } EXPORT_SYMBOL(dcb_getapp); /** * dcb_setapp - add CEE dcb application data to app list * @dev: network interface * @new: application data to add * * Priority 0 is an invalid priority in CEE spec. This routine * removes applications from the app list if the priority is * set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap */ int dcb_setapp(struct net_device *dev, struct dcb_app *new) { struct dcb_app_type *itr; struct dcb_app_type event; int err = 0; event.ifindex = dev->ifindex; memcpy(&event.app, new, sizeof(event.app)); if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); spin_lock_bh(&dcb_lock); /* Search for existing match and replace */ itr = dcb_app_lookup(new, dev->ifindex, -1); if (itr) { if (new->priority) itr->app.priority = new->priority; else { list_del(&itr->list); kfree(itr); } goto out; } /* App type does not exist add new application type */ if (new->priority) err = dcb_app_add(&dcb_app_list, new, dev->ifindex); out: spin_unlock_bh(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; } EXPORT_SYMBOL(dcb_setapp); /** * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority * @dev: network interface * @app: where to store the retrieve application data * * Helper routine which on success returns a non-zero 802.1Qaz user * priority bitmap otherwise returns 0 to indicate the dcb_app was * not found in APP list. */ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) { struct dcb_app_type *itr; u8 prio = 0; spin_lock_bh(&dcb_lock); itr = dcb_app_lookup(app, dev->ifindex, -1); if (itr) prio |= 1 << itr->app.priority; spin_unlock_bh(&dcb_lock); return prio; } EXPORT_SYMBOL(dcb_ieee_getapp_mask); /* Get protocol value from rewrite entry. */ u16 dcb_getrewr(struct net_device *dev, struct dcb_app *app) { struct dcb_app_type *itr; u16 proto = 0; spin_lock_bh(&dcb_lock); itr = dcb_rewr_lookup(app, dev->ifindex, -1); if (itr) proto = itr->app.protocol; spin_unlock_bh(&dcb_lock); return proto; } EXPORT_SYMBOL(dcb_getrewr); /* Add rewrite entry to the rewrite list. */ int dcb_setrewr(struct net_device *dev, struct dcb_app *new) { int err; spin_lock_bh(&dcb_lock); /* Search for existing match and abort if found. */ if (dcb_rewr_lookup(new, dev->ifindex, new->protocol)) { err = -EEXIST; goto out; } err = dcb_app_add(&dcb_rewr_list, new, dev->ifindex); out: spin_unlock_bh(&dcb_lock); return err; } EXPORT_SYMBOL(dcb_setrewr); /* Delete rewrite entry from the rewrite list. */ int dcb_delrewr(struct net_device *dev, struct dcb_app *del) { struct dcb_app_type *itr; int err = -ENOENT; spin_lock_bh(&dcb_lock); /* Search for existing match and remove it. */ itr = dcb_rewr_lookup(del, dev->ifindex, del->protocol); if (itr) { list_del(&itr->list); kfree(itr); err = 0; } spin_unlock_bh(&dcb_lock); return err; } EXPORT_SYMBOL(dcb_delrewr); /** * dcb_ieee_setapp - add IEEE dcb application data to app list * @dev: network interface * @new: application data to add * * This adds Application data to the list. Multiple application * entries may exists for the same selector and protocol as long * as the priorities are different. Priority is expected to be a * 3-bit unsigned integer */ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) { struct dcb_app_type event; int err = 0; event.ifindex = dev->ifindex; memcpy(&event.app, new, sizeof(event.app)); if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); spin_lock_bh(&dcb_lock); /* Search for existing match and abort if found */ if (dcb_app_lookup(new, dev->ifindex, new->priority)) { err = -EEXIST; goto out; } err = dcb_app_add(&dcb_app_list, new, dev->ifindex); out: spin_unlock_bh(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; } EXPORT_SYMBOL(dcb_ieee_setapp); /** * dcb_ieee_delapp - delete IEEE dcb application data from list * @dev: network interface * @del: application data to delete * * This removes a matching APP data from the APP list */ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) { struct dcb_app_type *itr; struct dcb_app_type event; int err = -ENOENT; event.ifindex = dev->ifindex; memcpy(&event.app, del, sizeof(event.app)); if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); spin_lock_bh(&dcb_lock); /* Search for existing match and remove it. */ if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) { list_del(&itr->list); kfree(itr); err = 0; } spin_unlock_bh(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; } EXPORT_SYMBOL(dcb_ieee_delapp); /* dcb_getrewr_prio_pcp_mask_map - For a given device, find mapping from * priorities to the PCP and DEI values assigned to that priority. */ void dcb_getrewr_prio_pcp_mask_map(const struct net_device *dev, struct dcb_rewr_prio_pcp_map *p_map) { int ifindex = dev->ifindex; struct dcb_app_type *itr; u8 prio; memset(p_map->map, 0, sizeof(p_map->map)); spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_rewr_list, list) { if (itr->ifindex == ifindex && itr->app.selector == DCB_APP_SEL_PCP && itr->app.protocol < 16 && itr->app.priority < IEEE_8021QAZ_MAX_TCS) { prio = itr->app.priority; p_map->map[prio] |= 1 << itr->app.protocol; } } spin_unlock_bh(&dcb_lock); } EXPORT_SYMBOL(dcb_getrewr_prio_pcp_mask_map); /* dcb_getrewr_prio_dscp_mask_map - For a given device, find mapping from * priorities to the DSCP values assigned to that priority. */ void dcb_getrewr_prio_dscp_mask_map(const struct net_device *dev, struct dcb_ieee_app_prio_map *p_map) { int ifindex = dev->ifindex; struct dcb_app_type *itr; u8 prio; memset(p_map->map, 0, sizeof(p_map->map)); spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_rewr_list, list) { if (itr->ifindex == ifindex && itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP && itr->app.protocol < 64 && itr->app.priority < IEEE_8021QAZ_MAX_TCS) { prio = itr->app.priority; p_map->map[prio] |= 1ULL << itr->app.protocol; } } spin_unlock_bh(&dcb_lock); } EXPORT_SYMBOL(dcb_getrewr_prio_dscp_mask_map); /* * dcb_ieee_getapp_prio_dscp_mask_map - For a given device, find mapping from * priorities to the DSCP values assigned to that priority. Initialize p_map * such that each map element holds a bit mask of DSCP values configured for * that priority by APP entries. */ void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev, struct dcb_ieee_app_prio_map *p_map) { int ifindex = dev->ifindex; struct dcb_app_type *itr; u8 prio; memset(p_map->map, 0, sizeof(p_map->map)); spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == ifindex && itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP && itr->app.protocol < 64 && itr->app.priority < IEEE_8021QAZ_MAX_TCS) { prio = itr->app.priority; p_map->map[prio] |= 1ULL << itr->app.protocol; } } spin_unlock_bh(&dcb_lock); } EXPORT_SYMBOL(dcb_ieee_getapp_prio_dscp_mask_map); /* * dcb_ieee_getapp_dscp_prio_mask_map - For a given device, find mapping from * DSCP values to the priorities assigned to that DSCP value. Initialize p_map * such that each map element holds a bit mask of priorities configured for a * given DSCP value by APP entries. */ void dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev, struct dcb_ieee_app_dscp_map *p_map) { int ifindex = dev->ifindex; struct dcb_app_type *itr; memset(p_map->map, 0, sizeof(p_map->map)); spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == ifindex && itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP && itr->app.protocol < 64 && itr->app.priority < IEEE_8021QAZ_MAX_TCS) p_map->map[itr->app.protocol] |= 1 << itr->app.priority; } spin_unlock_bh(&dcb_lock); } EXPORT_SYMBOL(dcb_ieee_getapp_dscp_prio_mask_map); /* * Per 802.1Q-2014, the selector value of 1 is used for matching on Ethernet * type, with valid PID values >= 1536. A special meaning is then assigned to * protocol value of 0: "default priority. For use when priority is not * otherwise specified". * * dcb_ieee_getapp_default_prio_mask - For a given device, find all APP entries * of the form {$PRIO, ETHERTYPE, 0} and construct a bit mask of all default * priorities set by these entries. */ u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev) { int ifindex = dev->ifindex; struct dcb_app_type *itr; u8 mask = 0; spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == ifindex && itr->app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && itr->app.protocol == 0 && itr->app.priority < IEEE_8021QAZ_MAX_TCS) mask |= 1 << itr->app.priority; } spin_unlock_bh(&dcb_lock); return mask; } EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask); static void dcbnl_flush_dev(struct net_device *dev) { struct dcb_app_type *itr, *tmp; spin_lock_bh(&dcb_lock); list_for_each_entry_safe(itr, tmp, &dcb_app_list, list) { if (itr->ifindex == dev->ifindex) { list_del(&itr->list); kfree(itr); } } spin_unlock_bh(&dcb_lock); } static int dcbnl_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_UNREGISTER: if (!dev->dcbnl_ops) return NOTIFY_DONE; dcbnl_flush_dev(dev); return NOTIFY_OK; default: return NOTIFY_DONE; } } static struct notifier_block dcbnl_nb __read_mostly = { .notifier_call = dcbnl_netdevice_event, }; static const struct rtnl_msg_handler dcbnl_rtnl_msg_handlers[] __initconst = { {.msgtype = RTM_GETDCB, .doit = dcb_doit}, {.msgtype = RTM_SETDCB, .doit = dcb_doit}, }; static int __init dcbnl_init(void) { int err; err = register_netdevice_notifier(&dcbnl_nb); if (err) return err; rtnl_register_many(dcbnl_rtnl_msg_handlers); return 0; } device_initcall(dcbnl_init);
14 14 40 1 2 8 8 8 8 8 2 11 22 1 1 13 25 26 10 22 17 63 63 1 20 42 59 1 59 59 54 17 42 15 40 40 39 1 40 40 6 12 7 50 7 7 50 14 29 15 15 15 14 15 15 3 15 15 5 6 11 3 15 15 5 15 15 5 54 54 46 11 42 16 14 3 5 54 43 15 29 14 16 39 54 40 40 51 51 51 63 63 3 54 54 14 40 54 1 17 51 17 18 17 18 18 10 16 11 48 10 16 11 18 13 3 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 // SPDX-License-Identifier: GPL-2.0 #include <linux/fanotify.h> #include <linux/fsnotify_backend.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/kernel.h> /* UINT_MAX */ #include <linux/mount.h> #include <linux/sched.h> #include <linux/sched/user.h> #include <linux/sched/signal.h> #include <linux/types.h> #include <linux/wait.h> #include <linux/audit.h> #include <linux/sched/mm.h> #include <linux/statfs.h> #include <linux/stringhash.h> #include "fanotify.h" static bool fanotify_path_equal(const struct path *p1, const struct path *p2) { return p1->mnt == p2->mnt && p1->dentry == p2->dentry; } static unsigned int fanotify_hash_path(const struct path *path) { return hash_ptr(path->dentry, FANOTIFY_EVENT_HASH_BITS) ^ hash_ptr(path->mnt, FANOTIFY_EVENT_HASH_BITS); } static unsigned int fanotify_hash_fsid(__kernel_fsid_t *fsid) { return hash_32(fsid->val[0], FANOTIFY_EVENT_HASH_BITS) ^ hash_32(fsid->val[1], FANOTIFY_EVENT_HASH_BITS); } static bool fanotify_fh_equal(struct fanotify_fh *fh1, struct fanotify_fh *fh2) { if (fh1->type != fh2->type || fh1->len != fh2->len) return false; return !fh1->len || !memcmp(fanotify_fh_buf(fh1), fanotify_fh_buf(fh2), fh1->len); } static unsigned int fanotify_hash_fh(struct fanotify_fh *fh) { long salt = (long)fh->type | (long)fh->len << 8; /* * full_name_hash() works long by long, so it handles fh buf optimally. */ return full_name_hash((void *)salt, fanotify_fh_buf(fh), fh->len); } static bool fanotify_fid_event_equal(struct fanotify_fid_event *ffe1, struct fanotify_fid_event *ffe2) { /* Do not merge fid events without object fh */ if (!ffe1->object_fh.len) return false; return fanotify_fsid_equal(&ffe1->fsid, &ffe2->fsid) && fanotify_fh_equal(&ffe1->object_fh, &ffe2->object_fh); } static bool fanotify_info_equal(struct fanotify_info *info1, struct fanotify_info *info2) { if (info1->dir_fh_totlen != info2->dir_fh_totlen || info1->dir2_fh_totlen != info2->dir2_fh_totlen || info1->file_fh_totlen != info2->file_fh_totlen || info1->name_len != info2->name_len || info1->name2_len != info2->name2_len) return false; if (info1->dir_fh_totlen && !fanotify_fh_equal(fanotify_info_dir_fh(info1), fanotify_info_dir_fh(info2))) return false; if (info1->dir2_fh_totlen && !fanotify_fh_equal(fanotify_info_dir2_fh(info1), fanotify_info_dir2_fh(info2))) return false; if (info1->file_fh_totlen && !fanotify_fh_equal(fanotify_info_file_fh(info1), fanotify_info_file_fh(info2))) return false; if (info1->name_len && memcmp(fanotify_info_name(info1), fanotify_info_name(info2), info1->name_len)) return false; return !info1->name2_len || !memcmp(fanotify_info_name2(info1), fanotify_info_name2(info2), info1->name2_len); } static bool fanotify_name_event_equal(struct fanotify_name_event *fne1, struct fanotify_name_event *fne2) { struct fanotify_info *info1 = &fne1->info; struct fanotify_info *info2 = &fne2->info; /* Do not merge name events without dir fh */ if (!info1->dir_fh_totlen) return false; if (!fanotify_fsid_equal(&fne1->fsid, &fne2->fsid)) return false; return fanotify_info_equal(info1, info2); } static bool fanotify_error_event_equal(struct fanotify_error_event *fee1, struct fanotify_error_event *fee2) { /* Error events against the same file system are always merged. */ if (!fanotify_fsid_equal(&fee1->fsid, &fee2->fsid)) return false; return true; } static bool fanotify_should_merge(struct fanotify_event *old, struct fanotify_event *new) { pr_debug("%s: old=%p new=%p\n", __func__, old, new); if (old->hash != new->hash || old->type != new->type || old->pid != new->pid) return false; /* * We want to merge many dirent events in the same dir (i.e. * creates/unlinks/renames), but we do not want to merge dirent * events referring to subdirs with dirent events referring to * non subdirs, otherwise, user won't be able to tell from a * mask FAN_CREATE|FAN_DELETE|FAN_ONDIR if it describes mkdir+ * unlink pair or rmdir+create pair of events. */ if ((old->mask & FS_ISDIR) != (new->mask & FS_ISDIR)) return false; /* * FAN_RENAME event is reported with special info record types, * so we cannot merge it with other events. */ if ((old->mask & FAN_RENAME) != (new->mask & FAN_RENAME)) return false; switch (old->type) { case FANOTIFY_EVENT_TYPE_PATH: return fanotify_path_equal(fanotify_event_path(old), fanotify_event_path(new)); case FANOTIFY_EVENT_TYPE_FID: return fanotify_fid_event_equal(FANOTIFY_FE(old), FANOTIFY_FE(new)); case FANOTIFY_EVENT_TYPE_FID_NAME: return fanotify_name_event_equal(FANOTIFY_NE(old), FANOTIFY_NE(new)); case FANOTIFY_EVENT_TYPE_FS_ERROR: return fanotify_error_event_equal(FANOTIFY_EE(old), FANOTIFY_EE(new)); case FANOTIFY_EVENT_TYPE_MNT: return false; default: WARN_ON_ONCE(1); } return false; } /* Limit event merges to limit CPU overhead per event */ #define FANOTIFY_MAX_MERGE_EVENTS 128 /* and the list better be locked by something too! */ static int fanotify_merge(struct fsnotify_group *group, struct fsnotify_event *event) { struct fanotify_event *old, *new = FANOTIFY_E(event); unsigned int bucket = fanotify_event_hash_bucket(group, new); struct hlist_head *hlist = &group->fanotify_data.merge_hash[bucket]; int i = 0; pr_debug("%s: group=%p event=%p bucket=%u\n", __func__, group, event, bucket); /* * Don't merge a permission event with any other event so that we know * the event structure we have created in fanotify_handle_event() is the * one we should check for permission response. */ if (fanotify_is_perm_event(new->mask)) return 0; hlist_for_each_entry(old, hlist, merge_list) { if (++i > FANOTIFY_MAX_MERGE_EVENTS) break; if (fanotify_should_merge(old, new)) { old->mask |= new->mask; if (fanotify_is_error_event(old->mask)) FANOTIFY_EE(old)->err_count++; return 1; } } return 0; } /* * Wait for response to permission event. The function also takes care of * freeing the permission event (or offloads that in case the wait is canceled * by a signal). The function returns 0 in case access got allowed by userspace, * -EPERM in case userspace disallowed the access, and -ERESTARTSYS in case * the wait got interrupted by a signal. */ static int fanotify_get_response(struct fsnotify_group *group, struct fanotify_perm_event *event, struct fsnotify_iter_info *iter_info) { int ret, errno; pr_debug("%s: group=%p event=%p\n", __func__, group, event); ret = wait_event_state(group->fanotify_data.access_waitq, event->state == FAN_EVENT_ANSWERED, (TASK_KILLABLE|TASK_FREEZABLE)); /* Signal pending? */ if (ret < 0) { spin_lock(&group->notification_lock); /* Event reported to userspace and no answer yet? */ if (event->state == FAN_EVENT_REPORTED) { /* Event will get freed once userspace answers to it */ event->state = FAN_EVENT_CANCELED; spin_unlock(&group->notification_lock); return ret; } /* Event not yet reported? Just remove it. */ if (event->state == FAN_EVENT_INIT) { fsnotify_remove_queued_event(group, &event->fae.fse); /* Permission events are not supposed to be hashed */ WARN_ON_ONCE(!hlist_unhashed(&event->fae.merge_list)); } /* * Event may be also answered in case signal delivery raced * with wakeup. In that case we have nothing to do besides * freeing the event and reporting error. */ spin_unlock(&group->notification_lock); goto out; } /* userspace responded, convert to something usable */ switch (event->response & FANOTIFY_RESPONSE_ACCESS) { case FAN_ALLOW: ret = 0; break; case FAN_DENY: /* Check custom errno from pre-content events */ errno = fanotify_get_response_errno(event->response); if (errno) { ret = -errno; break; } fallthrough; default: ret = -EPERM; } /* Check if the response should be audited */ if (event->response & FAN_AUDIT) { u32 response = event->response & (FANOTIFY_RESPONSE_ACCESS | FANOTIFY_RESPONSE_FLAGS); audit_fanotify(response & ~FAN_AUDIT, &event->audit_rule); } pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__, group, event, ret); out: fsnotify_destroy_event(group, &event->fae.fse); return ret; } /* * This function returns a mask for an event that only contains the flags * that have been specifically requested by the user. Flags that may have * been included within the event mask, but have not been explicitly * requested by the user, will not be present in the returned mask. */ static u32 fanotify_group_event_mask(struct fsnotify_group *group, struct fsnotify_iter_info *iter_info, u32 *match_mask, u32 event_mask, const void *data, int data_type, struct inode *dir) { __u32 marks_mask = 0, marks_ignore_mask = 0; __u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS | FANOTIFY_EVENT_FLAGS; const struct path *path = fsnotify_data_path(data, data_type); unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); struct fsnotify_mark *mark; bool ondir = event_mask & FAN_ONDIR; int type; pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n", __func__, iter_info->report_mask, event_mask, data, data_type); if (FAN_GROUP_FLAG(group, FAN_REPORT_MNT)) { if (data_type != FSNOTIFY_EVENT_MNT) return 0; } else if (!fid_mode) { /* Do we have path to open a file descriptor? */ if (!path) return 0; /* Path type events are only relevant for files and dirs */ if (!d_is_reg(path->dentry) && !d_can_lookup(path->dentry)) return 0; } else if (!(fid_mode & FAN_REPORT_FID)) { /* Do we have a directory inode to report? */ if (!dir && !ondir) return 0; } fsnotify_foreach_iter_mark_type(iter_info, mark, type) { /* * Apply ignore mask depending on event flags in ignore mask. */ marks_ignore_mask |= fsnotify_effective_ignore_mask(mark, ondir, type); /* * Send the event depending on event flags in mark mask. */ if (!fsnotify_mask_applicable(mark->mask, ondir, type)) continue; marks_mask |= mark->mask; /* Record the mark types of this group that matched the event */ *match_mask |= 1U << type; } test_mask = event_mask & marks_mask & ~marks_ignore_mask; /* * For dirent modification events (create/delete/move) that do not carry * the child entry name information, we report FAN_ONDIR for mkdir/rmdir * so user can differentiate them from creat/unlink. * * For backward compatibility and consistency, do not report FAN_ONDIR * to user in legacy fanotify mode (reporting fd) and report FAN_ONDIR * to user in fid mode for all event types. * * We never report FAN_EVENT_ON_CHILD to user, but we do pass it in to * fanotify_alloc_event() when group is reporting fid as indication * that event happened on child. */ if (fid_mode) { /* Do not report event flags without any event */ if (!(test_mask & ~FANOTIFY_EVENT_FLAGS)) return 0; } else { user_mask &= ~FANOTIFY_EVENT_FLAGS; } return test_mask & user_mask; } /* * Check size needed to encode fanotify_fh. * * Return size of encoded fh without fanotify_fh header. * Return 0 on failure to encode. */ static int fanotify_encode_fh_len(struct inode *inode) { int dwords = 0; int fh_len; if (!inode) return 0; exportfs_encode_fid(inode, NULL, &dwords); fh_len = dwords << 2; /* * struct fanotify_error_event might be preallocated and is * limited to MAX_HANDLE_SZ. This should never happen, but * safeguard by forcing an invalid file handle. */ if (WARN_ON_ONCE(fh_len > MAX_HANDLE_SZ)) return 0; return fh_len; } /* * Encode fanotify_fh. * * Return total size of encoded fh including fanotify_fh header. * Return 0 on failure to encode. */ static int fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode, unsigned int fh_len, unsigned int *hash, gfp_t gfp) { int dwords, type = 0; char *ext_buf = NULL; void *buf = fh + 1; int err; fh->type = FILEID_ROOT; fh->len = 0; fh->flags = 0; /* * Invalid FHs are used by FAN_FS_ERROR for errors not * linked to any inode. The f_handle won't be reported * back to userspace. */ if (!inode) goto out; /* * !gpf means preallocated variable size fh, but fh_len could * be zero in that case if encoding fh len failed. */ err = -ENOENT; if (fh_len < 4 || WARN_ON_ONCE(fh_len % 4) || fh_len > MAX_HANDLE_SZ) goto out_err; /* No external buffer in a variable size allocated fh */ if (gfp && fh_len > FANOTIFY_INLINE_FH_LEN) { /* Treat failure to allocate fh as failure to encode fh */ err = -ENOMEM; ext_buf = kmalloc(fh_len, gfp); if (!ext_buf) goto out_err; *fanotify_fh_ext_buf_ptr(fh) = ext_buf; buf = ext_buf; fh->flags |= FANOTIFY_FH_FLAG_EXT_BUF; } dwords = fh_len >> 2; type = exportfs_encode_fid(inode, buf, &dwords); err = -EINVAL; /* * Unlike file_handle, type and len of struct fanotify_fh are u8. * Traditionally, filesystem return handle_type < 0xff, but there * is no enforecement for that in vfs. */ BUILD_BUG_ON(MAX_HANDLE_SZ > 0xff || FILEID_INVALID > 0xff); if (type <= 0 || type >= FILEID_INVALID || fh_len != dwords << 2) goto out_err; fh->type = type; fh->len = fh_len; out: /* * Mix fh into event merge key. Hash might be NULL in case of * unhashed FID events (i.e. FAN_FS_ERROR). */ if (hash) *hash ^= fanotify_hash_fh(fh); return FANOTIFY_FH_HDR_LEN + fh_len; out_err: pr_warn_ratelimited("fanotify: failed to encode fid (type=%d, len=%d, err=%i)\n", type, fh_len, err); kfree(ext_buf); *fanotify_fh_ext_buf_ptr(fh) = NULL; /* Report the event without a file identifier on encode error */ fh->type = FILEID_INVALID; fh->len = 0; return 0; } /* * FAN_REPORT_FID is ambiguous in that it reports the fid of the child for * some events and the fid of the parent for create/delete/move events. * * With the FAN_REPORT_TARGET_FID flag, the fid of the child is reported * also in create/delete/move events in addition to the fid of the parent * and the name of the child. */ static inline bool fanotify_report_child_fid(unsigned int fid_mode, u32 mask) { if (mask & ALL_FSNOTIFY_DIRENT_EVENTS) return (fid_mode & FAN_REPORT_TARGET_FID); return (fid_mode & FAN_REPORT_FID) && !(mask & FAN_ONDIR); } /* * The inode to use as identifier when reporting fid depends on the event * and the group flags. * * With the group flag FAN_REPORT_TARGET_FID, always report the child fid. * * Without the group flag FAN_REPORT_TARGET_FID, report the modified directory * fid on dirent events and the child fid otherwise. * * For example: * FS_ATTRIB reports the child fid even if reported on a watched parent. * FS_CREATE reports the modified dir fid without FAN_REPORT_TARGET_FID. * and reports the created child fid with FAN_REPORT_TARGET_FID. */ static struct inode *fanotify_fid_inode(u32 event_mask, const void *data, int data_type, struct inode *dir, unsigned int fid_mode) { if ((event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) && !(fid_mode & FAN_REPORT_TARGET_FID)) return dir; return fsnotify_data_inode(data, data_type); } /* * The inode to use as identifier when reporting dir fid depends on the event. * Report the modified directory inode on dirent modification events. * Report the "victim" inode if "victim" is a directory. * Report the parent inode if "victim" is not a directory and event is * reported to parent. * Otherwise, do not report dir fid. */ static struct inode *fanotify_dfid_inode(u32 event_mask, const void *data, int data_type, struct inode *dir) { struct inode *inode = fsnotify_data_inode(data, data_type); if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) return dir; if (inode && S_ISDIR(inode->i_mode)) return inode; return dir; } static struct fanotify_event *fanotify_alloc_path_event(const struct path *path, unsigned int *hash, gfp_t gfp) { struct fanotify_path_event *pevent; pevent = kmem_cache_alloc(fanotify_path_event_cachep, gfp); if (!pevent) return NULL; pevent->fae.type = FANOTIFY_EVENT_TYPE_PATH; pevent->path = *path; *hash ^= fanotify_hash_path(path); path_get(path); return &pevent->fae; } static struct fanotify_event *fanotify_alloc_mnt_event(u64 mnt_id, gfp_t gfp) { struct fanotify_mnt_event *pevent; pevent = kmem_cache_alloc(fanotify_mnt_event_cachep, gfp); if (!pevent) return NULL; pevent->fae.type = FANOTIFY_EVENT_TYPE_MNT; pevent->mnt_id = mnt_id; return &pevent->fae; } static struct fanotify_event *fanotify_alloc_perm_event(const void *data, int data_type, gfp_t gfp) { const struct path *path = fsnotify_data_path(data, data_type); const struct file_range *range = fsnotify_data_file_range(data, data_type); struct fanotify_perm_event *pevent; pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp); if (!pevent) return NULL; pevent->fae.type = FANOTIFY_EVENT_TYPE_PATH_PERM; pevent->response = 0; pevent->hdr.type = FAN_RESPONSE_INFO_NONE; pevent->hdr.pad = 0; pevent->hdr.len = 0; pevent->state = FAN_EVENT_INIT; pevent->path = *path; /* NULL ppos means no range info */ pevent->ppos = range ? &range->pos : NULL; pevent->count = range ? range->count : 0; path_get(path); return &pevent->fae; } static struct fanotify_event *fanotify_alloc_fid_event(struct inode *id, __kernel_fsid_t *fsid, unsigned int *hash, gfp_t gfp) { struct fanotify_fid_event *ffe; ffe = kmem_cache_alloc(fanotify_fid_event_cachep, gfp); if (!ffe) return NULL; ffe->fae.type = FANOTIFY_EVENT_TYPE_FID; ffe->fsid = *fsid; *hash ^= fanotify_hash_fsid(fsid); fanotify_encode_fh(&ffe->object_fh, id, fanotify_encode_fh_len(id), hash, gfp); return &ffe->fae; } static struct fanotify_event *fanotify_alloc_name_event(struct inode *dir, __kernel_fsid_t *fsid, const struct qstr *name, struct inode *child, struct dentry *moved, unsigned int *hash, gfp_t gfp) { struct fanotify_name_event *fne; struct fanotify_info *info; struct fanotify_fh *dfh, *ffh; struct inode *dir2 = moved ? d_inode(moved->d_parent) : NULL; const struct qstr *name2 = moved ? &moved->d_name : NULL; unsigned int dir_fh_len = fanotify_encode_fh_len(dir); unsigned int dir2_fh_len = fanotify_encode_fh_len(dir2); unsigned int child_fh_len = fanotify_encode_fh_len(child); unsigned long name_len = name ? name->len : 0; unsigned long name2_len = name2 ? name2->len : 0; unsigned int len, size; /* Reserve terminating null byte even for empty name */ size = sizeof(*fne) + name_len + name2_len + 2; if (dir_fh_len) size += FANOTIFY_FH_HDR_LEN + dir_fh_len; if (dir2_fh_len) size += FANOTIFY_FH_HDR_LEN + dir2_fh_len; if (child_fh_len) size += FANOTIFY_FH_HDR_LEN + child_fh_len; fne = kmalloc(size, gfp); if (!fne) return NULL; fne->fae.type = FANOTIFY_EVENT_TYPE_FID_NAME; fne->fsid = *fsid; *hash ^= fanotify_hash_fsid(fsid); info = &fne->info; fanotify_info_init(info); if (dir_fh_len) { dfh = fanotify_info_dir_fh(info); len = fanotify_encode_fh(dfh, dir, dir_fh_len, hash, 0); fanotify_info_set_dir_fh(info, len); } if (dir2_fh_len) { dfh = fanotify_info_dir2_fh(info); len = fanotify_encode_fh(dfh, dir2, dir2_fh_len, hash, 0); fanotify_info_set_dir2_fh(info, len); } if (child_fh_len) { ffh = fanotify_info_file_fh(info); len = fanotify_encode_fh(ffh, child, child_fh_len, hash, 0); fanotify_info_set_file_fh(info, len); } if (name_len) { fanotify_info_copy_name(info, name); *hash ^= full_name_hash((void *)name_len, name->name, name_len); } if (name2_len) { fanotify_info_copy_name2(info, name2); *hash ^= full_name_hash((void *)name2_len, name2->name, name2_len); } pr_debug("%s: size=%u dir_fh_len=%u child_fh_len=%u name_len=%u name='%.*s'\n", __func__, size, dir_fh_len, child_fh_len, info->name_len, info->name_len, fanotify_info_name(info)); if (dir2_fh_len) { pr_debug("%s: dir2_fh_len=%u name2_len=%u name2='%.*s'\n", __func__, dir2_fh_len, info->name2_len, info->name2_len, fanotify_info_name2(info)); } return &fne->fae; } static struct fanotify_event *fanotify_alloc_error_event( struct fsnotify_group *group, __kernel_fsid_t *fsid, const void *data, int data_type, unsigned int *hash) { struct fs_error_report *report = fsnotify_data_error_report(data, data_type); struct inode *inode; struct fanotify_error_event *fee; int fh_len; if (WARN_ON_ONCE(!report)) return NULL; fee = mempool_alloc(&group->fanotify_data.error_events_pool, GFP_NOFS); if (!fee) return NULL; fee->fae.type = FANOTIFY_EVENT_TYPE_FS_ERROR; fee->error = report->error; fee->err_count = 1; fee->fsid = *fsid; inode = report->inode; fh_len = fanotify_encode_fh_len(inode); /* Bad fh_len. Fallback to using an invalid fh. Should never happen. */ if (!fh_len && inode) inode = NULL; fanotify_encode_fh(&fee->object_fh, inode, fh_len, NULL, 0); *hash ^= fanotify_hash_fsid(fsid); return &fee->fae; } static struct fanotify_event *fanotify_alloc_event( struct fsnotify_group *group, u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *file_name, __kernel_fsid_t *fsid, u32 match_mask) { struct fanotify_event *event = NULL; gfp_t gfp = GFP_KERNEL_ACCOUNT; unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); struct inode *id = fanotify_fid_inode(mask, data, data_type, dir, fid_mode); struct inode *dirid = fanotify_dfid_inode(mask, data, data_type, dir); const struct path *path = fsnotify_data_path(data, data_type); u64 mnt_id = fsnotify_data_mnt_id(data, data_type); struct mem_cgroup *old_memcg; struct dentry *moved = NULL; struct inode *child = NULL; bool name_event = false; unsigned int hash = 0; bool ondir = mask & FAN_ONDIR; struct pid *pid; if ((fid_mode & FAN_REPORT_DIR_FID) && dirid) { /* * For certain events and group flags, report the child fid * in addition to reporting the parent fid and maybe child name. */ if (fanotify_report_child_fid(fid_mode, mask) && id != dirid) child = id; id = dirid; /* * We record file name only in a group with FAN_REPORT_NAME * and when we have a directory inode to report. * * For directory entry modification event, we record the fid of * the directory and the name of the modified entry. * * For event on non-directory that is reported to parent, we * record the fid of the parent and the name of the child. * * Even if not reporting name, we need a variable length * fanotify_name_event if reporting both parent and child fids. */ if (!(fid_mode & FAN_REPORT_NAME)) { name_event = !!child; file_name = NULL; } else if ((mask & ALL_FSNOTIFY_DIRENT_EVENTS) || !ondir) { name_event = true; } /* * In the special case of FAN_RENAME event, use the match_mask * to determine if we need to report only the old parent+name, * only the new parent+name or both. * 'dirid' and 'file_name' are the old parent+name and * 'moved' has the new parent+name. */ if (mask & FAN_RENAME) { bool report_old, report_new; if (WARN_ON_ONCE(!match_mask)) return NULL; /* Report both old and new parent+name if sb watching */ report_old = report_new = match_mask & (1U << FSNOTIFY_ITER_TYPE_SB); report_old |= match_mask & (1U << FSNOTIFY_ITER_TYPE_INODE); report_new |= match_mask & (1U << FSNOTIFY_ITER_TYPE_INODE2); if (!report_old) { /* Do not report old parent+name */ dirid = NULL; file_name = NULL; } if (report_new) { /* Report new parent+name */ moved = fsnotify_data_dentry(data, data_type); } } } /* * For queues with unlimited length lost events are not expected and * can possibly have security implications. Avoid losing events when * memory is short. For the limited size queues, avoid OOM killer in the * target monitoring memcg as it may have security repercussion. */ if (group->max_events == UINT_MAX) gfp |= __GFP_NOFAIL; else gfp |= __GFP_RETRY_MAYFAIL; /* Whoever is interested in the event, pays for the allocation. */ old_memcg = set_active_memcg(group->memcg); if (fanotify_is_perm_event(mask)) { event = fanotify_alloc_perm_event(data, data_type, gfp); } else if (fanotify_is_error_event(mask)) { event = fanotify_alloc_error_event(group, fsid, data, data_type, &hash); } else if (name_event && (file_name || moved || child)) { event = fanotify_alloc_name_event(dirid, fsid, file_name, child, moved, &hash, gfp); } else if (fid_mode) { event = fanotify_alloc_fid_event(id, fsid, &hash, gfp); } else if (path) { event = fanotify_alloc_path_event(path, &hash, gfp); } else if (mnt_id) { event = fanotify_alloc_mnt_event(mnt_id, gfp); } else { WARN_ON_ONCE(1); } if (!event) goto out; if (FAN_GROUP_FLAG(group, FAN_REPORT_TID)) pid = get_pid(task_pid(current)); else pid = get_pid(task_tgid(current)); /* Mix event info, FAN_ONDIR flag and pid into event merge key */ hash ^= hash_long((unsigned long)pid | ondir, FANOTIFY_EVENT_HASH_BITS); fanotify_init_event(event, hash, mask); event->pid = pid; out: set_active_memcg(old_memcg); return event; } /* * Get cached fsid of the filesystem containing the object from any mark. * All marks are supposed to have the same fsid, but we do not verify that here. */ static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info) { struct fsnotify_mark *mark; int type; __kernel_fsid_t fsid = {}; fsnotify_foreach_iter_mark_type(iter_info, mark, type) { if (!(mark->flags & FSNOTIFY_MARK_FLAG_HAS_FSID)) continue; fsid = FANOTIFY_MARK(mark)->fsid; if (!(mark->flags & FSNOTIFY_MARK_FLAG_WEAK_FSID) && WARN_ON_ONCE(!fsid.val[0] && !fsid.val[1])) continue; return fsid; } return fsid; } /* * Add an event to hash table for faster merge. */ static void fanotify_insert_event(struct fsnotify_group *group, struct fsnotify_event *fsn_event) { struct fanotify_event *event = FANOTIFY_E(fsn_event); unsigned int bucket = fanotify_event_hash_bucket(group, event); struct hlist_head *hlist = &group->fanotify_data.merge_hash[bucket]; assert_spin_locked(&group->notification_lock); if (!fanotify_is_hashed_event(event->mask)) return; pr_debug("%s: group=%p event=%p bucket=%u\n", __func__, group, event, bucket); hlist_add_head(&event->merge_list, hlist); } static int fanotify_handle_event(struct fsnotify_group *group, u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info) { int ret = 0; struct fanotify_event *event; struct fsnotify_event *fsn_event; __kernel_fsid_t fsid = {}; u32 match_mask = 0; BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS); BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY); BUILD_BUG_ON(FAN_ATTRIB != FS_ATTRIB); BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE); BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE); BUILD_BUG_ON(FAN_OPEN != FS_OPEN); BUILD_BUG_ON(FAN_MOVED_TO != FS_MOVED_TO); BUILD_BUG_ON(FAN_MOVED_FROM != FS_MOVED_FROM); BUILD_BUG_ON(FAN_CREATE != FS_CREATE); BUILD_BUG_ON(FAN_DELETE != FS_DELETE); BUILD_BUG_ON(FAN_DELETE_SELF != FS_DELETE_SELF); BUILD_BUG_ON(FAN_MOVE_SELF != FS_MOVE_SELF); BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD); BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW); BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM); BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM); BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR); BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC); BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM); BUILD_BUG_ON(FAN_FS_ERROR != FS_ERROR); BUILD_BUG_ON(FAN_RENAME != FS_RENAME); BUILD_BUG_ON(FAN_PRE_ACCESS != FS_PRE_ACCESS); BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 24); mask = fanotify_group_event_mask(group, iter_info, &match_mask, mask, data, data_type, dir); if (!mask) return 0; pr_debug("%s: group=%p mask=%x report_mask=%x\n", __func__, group, mask, match_mask); if (fanotify_is_perm_event(mask)) { /* * fsnotify_prepare_user_wait() fails if we race with mark * deletion. Just let the operation pass in that case. */ if (!fsnotify_prepare_user_wait(iter_info)) return 0; } if (FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS)) fsid = fanotify_get_fsid(iter_info); event = fanotify_alloc_event(group, mask, data, data_type, dir, file_name, &fsid, match_mask); ret = -ENOMEM; if (unlikely(!event)) { /* * We don't queue overflow events for permission events as * there the access is denied and so no event is in fact lost. */ if (!fanotify_is_perm_event(mask)) fsnotify_queue_overflow(group); goto finish; } fsn_event = &event->fse; ret = fsnotify_insert_event(group, fsn_event, fanotify_merge, fanotify_insert_event); if (ret) { /* Permission events shouldn't be merged */ BUG_ON(ret == 1 && mask & FANOTIFY_PERM_EVENTS); /* Our event wasn't used in the end. Free it. */ fsnotify_destroy_event(group, fsn_event); ret = 0; } else if (fanotify_is_perm_event(mask)) { ret = fanotify_get_response(group, FANOTIFY_PERM(event), iter_info); } finish: if (fanotify_is_perm_event(mask)) fsnotify_finish_user_wait(iter_info); return ret; } static void fanotify_free_group_priv(struct fsnotify_group *group) { put_user_ns(group->user_ns); kfree(group->fanotify_data.merge_hash); if (group->fanotify_data.ucounts) dec_ucount(group->fanotify_data.ucounts, UCOUNT_FANOTIFY_GROUPS); if (mempool_initialized(&group->fanotify_data.error_events_pool)) mempool_exit(&group->fanotify_data.error_events_pool); } static void fanotify_free_path_event(struct fanotify_event *event) { path_put(fanotify_event_path(event)); kmem_cache_free(fanotify_path_event_cachep, FANOTIFY_PE(event)); } static void fanotify_free_perm_event(struct fanotify_event *event) { path_put(fanotify_event_path(event)); kmem_cache_free(fanotify_perm_event_cachep, FANOTIFY_PERM(event)); } static void fanotify_free_fid_event(struct fanotify_event *event) { struct fanotify_fid_event *ffe = FANOTIFY_FE(event); if (fanotify_fh_has_ext_buf(&ffe->object_fh)) kfree(fanotify_fh_ext_buf(&ffe->object_fh)); kmem_cache_free(fanotify_fid_event_cachep, ffe); } static void fanotify_free_name_event(struct fanotify_event *event) { kfree(FANOTIFY_NE(event)); } static void fanotify_free_error_event(struct fsnotify_group *group, struct fanotify_event *event) { struct fanotify_error_event *fee = FANOTIFY_EE(event); mempool_free(fee, &group->fanotify_data.error_events_pool); } static void fanotify_free_mnt_event(struct fanotify_event *event) { kmem_cache_free(fanotify_mnt_event_cachep, FANOTIFY_ME(event)); } static void fanotify_free_event(struct fsnotify_group *group, struct fsnotify_event *fsn_event) { struct fanotify_event *event; event = FANOTIFY_E(fsn_event); put_pid(event->pid); switch (event->type) { case FANOTIFY_EVENT_TYPE_PATH: fanotify_free_path_event(event); break; case FANOTIFY_EVENT_TYPE_PATH_PERM: fanotify_free_perm_event(event); break; case FANOTIFY_EVENT_TYPE_FID: fanotify_free_fid_event(event); break; case FANOTIFY_EVENT_TYPE_FID_NAME: fanotify_free_name_event(event); break; case FANOTIFY_EVENT_TYPE_OVERFLOW: kfree(event); break; case FANOTIFY_EVENT_TYPE_FS_ERROR: fanotify_free_error_event(group, event); break; case FANOTIFY_EVENT_TYPE_MNT: fanotify_free_mnt_event(event); break; default: WARN_ON_ONCE(1); } } static void fanotify_freeing_mark(struct fsnotify_mark *mark, struct fsnotify_group *group) { if (!FAN_GROUP_FLAG(group, FAN_UNLIMITED_MARKS)) dec_ucount(group->fanotify_data.ucounts, UCOUNT_FANOTIFY_MARKS); } static void fanotify_free_mark(struct fsnotify_mark *fsn_mark) { kmem_cache_free(fanotify_mark_cache, FANOTIFY_MARK(fsn_mark)); } const struct fsnotify_ops fanotify_fsnotify_ops = { .handle_event = fanotify_handle_event, .free_group_priv = fanotify_free_group_priv, .free_event = fanotify_free_event, .freeing_mark = fanotify_freeing_mark, .free_mark = fanotify_free_mark, };
3010 3007 3009 794 3013 2 2 2 2 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 // SPDX-License-Identifier: GPL-2.0-only /* * fs/kernfs/symlink.c - kernfs symlink implementation * * Copyright (c) 2001-3 Patrick Mochel * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org> */ #include <linux/fs.h> #include <linux/gfp.h> #include <linux/namei.h> #include "kernfs-internal.h" /** * kernfs_create_link - create a symlink * @parent: directory to create the symlink in * @name: name of the symlink * @target: target node for the symlink to point to * * Return: the created node on success, ERR_PTR() value on error. * Ownership of the link matches ownership of the target. */ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent, const char *name, struct kernfs_node *target) { struct kernfs_node *kn; int error; kuid_t uid = GLOBAL_ROOT_UID; kgid_t gid = GLOBAL_ROOT_GID; if (target->iattr) { uid = target->iattr->ia_uid; gid = target->iattr->ia_gid; } kn = kernfs_new_node(parent, name, S_IFLNK|0777, uid, gid, KERNFS_LINK); if (!kn) return ERR_PTR(-ENOMEM); if (kernfs_ns_enabled(parent)) kn->ns = target->ns; kn->symlink.target_kn = target; kernfs_get(target); /* ref owned by symlink */ error = kernfs_add_one(kn); if (!error) return kn; kernfs_put(kn); return ERR_PTR(error); } static int kernfs_get_target_path(struct kernfs_node *parent, struct kernfs_node *target, char *path) { struct kernfs_node *base, *kn; char *s = path; int len = 0; /* go up to the root, stop at the base */ base = parent; while (kernfs_parent(base)) { kn = kernfs_parent(target); while (kernfs_parent(kn) && base != kn) kn = kernfs_parent(kn); if (base == kn) break; if ((s - path) + 3 >= PATH_MAX) return -ENAMETOOLONG; strcpy(s, "../"); s += 3; base = kernfs_parent(base); } /* determine end of target string for reverse fillup */ kn = target; while (kernfs_parent(kn) && kn != base) { len += strlen(kernfs_rcu_name(kn)) + 1; kn = kernfs_parent(kn); } /* check limits */ if (len < 2) return -EINVAL; len--; if ((s - path) + len >= PATH_MAX) return -ENAMETOOLONG; /* reverse fillup of target string from target to base */ kn = target; while (kernfs_parent(kn) && kn != base) { const char *name = kernfs_rcu_name(kn); int slen = strlen(name); len -= slen; memcpy(s + len, name, slen); if (len) s[--len] = '/'; kn = kernfs_parent(kn); } return 0; } static int kernfs_getlink(struct inode *inode, char *path) { struct kernfs_node *kn = inode->i_private; struct kernfs_node *parent; struct kernfs_node *target = kn->symlink.target_kn; struct kernfs_root *root = kernfs_root(kn); int error; down_read(&root->kernfs_rwsem); parent = kernfs_parent(kn); error = kernfs_get_target_path(parent, target, path); up_read(&root->kernfs_rwsem); return error; } static const char *kernfs_iop_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { char *body; int error; if (!dentry) return ERR_PTR(-ECHILD); body = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!body) return ERR_PTR(-ENOMEM); error = kernfs_getlink(inode, body); if (unlikely(error < 0)) { kfree(body); return ERR_PTR(error); } set_delayed_call(done, kfree_link, body); return body; } const struct inode_operations kernfs_symlink_iops = { .listxattr = kernfs_iop_listxattr, .get_link = kernfs_iop_get_link, .setattr = kernfs_iop_setattr, .getattr = kernfs_iop_getattr, .permission = kernfs_iop_permission, };
35651 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_CPUFEATURE_H #define _ASM_X86_CPUFEATURE_H #include <asm/processor.h> #if defined(__KERNEL__) && !defined(__ASSEMBLER__) #include <asm/asm.h> #include <linux/bitops.h> #include <asm/alternative.h> #include <asm/cpufeaturemasks.h> enum cpuid_leafs { CPUID_1_EDX = 0, CPUID_8000_0001_EDX, CPUID_8086_0001_EDX, CPUID_LNX_1, CPUID_1_ECX, CPUID_C000_0001_EDX, CPUID_8000_0001_ECX, CPUID_LNX_2, CPUID_LNX_3, CPUID_7_0_EBX, CPUID_D_1_EAX, CPUID_LNX_4, CPUID_7_1_EAX, CPUID_8000_0008_EBX, CPUID_6_EAX, CPUID_8000_000A_EDX, CPUID_7_ECX, CPUID_8000_0007_EBX, CPUID_7_EDX, CPUID_8000_001F_EAX, CPUID_8000_0021_EAX, CPUID_LNX_5, NR_CPUID_WORDS, }; extern const char * const x86_cap_flags[NCAPINTS*32]; extern const char * const x86_power_flags[32]; /* * In order to save room, we index into this array by doing * X86_BUG_<name> - NCAPINTS*32. */ extern const char * const x86_bug_flags[NBUGINTS*32]; #define x86_bug_flag(flag) x86_bug_flags[flag] #define test_cpu_cap(c, bit) \ arch_test_bit(bit, (unsigned long *)((c)->x86_capability)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ test_cpu_cap(c, bit)) #define this_cpu_has(bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ x86_this_cpu_test_bit(bit, cpu_info.x86_capability)) /* * This is the default CPU features testing macro to use in code. * * It is for detection of features which need kernel infrastructure to be * used. It may *not* directly test the CPU itself. Use the cpu_has() family * if you want true runtime testing of CPU features, like in hypervisor code * where you are supporting a possible guest feature where host support for it * is not relevant. */ #define cpu_feature_enabled(bit) \ (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : static_cpu_has(bit)) #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) extern void setup_clear_cpu_cap(unsigned int bit); extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); void check_cpufeature_deps(struct cpuinfo_x86 *c); #define setup_force_cpu_cap(bit) do { \ \ if (!boot_cpu_has(bit)) \ WARN_ON(alternatives_patched); \ \ set_cpu_cap(&boot_cpu_data, bit); \ set_bit(bit, (unsigned long *)cpu_caps_set); \ } while (0) #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) /* * Do not use an "m" constraint for [cap_byte] here: gcc doesn't know * that this is only used on a fallback path and will sometimes cause * it to manifest the address of boot_cpu_data in a register, fouling * the mainline (post-initialization) code. */ static __always_inline bool _static_cpu_has(u16 bit) { asm goto(ALTERNATIVE_TERNARY("jmp 6f", %c[feature], "", "jmp %l[t_no]") ".pushsection .altinstr_aux,\"ax\"\n" "6:\n" " testb %[bitnum], %a[cap_byte]\n" " jnz %l[t_yes]\n" " jmp %l[t_no]\n" ".popsection\n" : : [feature] "i" (bit), [bitnum] "i" (1 << (bit & 7)), [cap_byte] "i" (&((const char *)boot_cpu_data.x86_capability)[bit >> 3]) : : t_yes, t_no); t_yes: return true; t_no: return false; } #define static_cpu_has(bit) \ ( \ __builtin_constant_p(boot_cpu_has(bit)) ? \ boot_cpu_has(bit) : \ _static_cpu_has(bit) \ ) #define cpu_has_bug(c, bit) cpu_has(c, (bit)) #define set_cpu_bug(c, bit) set_cpu_cap(c, (bit)) #define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit)) #define static_cpu_has_bug(bit) static_cpu_has((bit)) #define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit)) #define boot_cpu_set_bug(bit) set_cpu_cap(&boot_cpu_data, (bit)) #define MAX_CPU_FEATURES (NCAPINTS * 32) #define cpu_have_feature boot_cpu_has #define CPU_FEATURE_TYPEFMT "x86,ven%04Xfam%04Xmod%04X" #define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \ boot_cpu_data.x86_model #endif /* defined(__KERNEL__) && !defined(__ASSEMBLER__) */ #endif /* _ASM_X86_CPUFEATURE_H */
84 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 /* SPDX-License-Identifier: GPL-2.0 */ /* * Common values for AES algorithms */ #ifndef _CRYPTO_AES_H #define _CRYPTO_AES_H #include <linux/types.h> #include <linux/crypto.h> #define AES_MIN_KEY_SIZE 16 #define AES_MAX_KEY_SIZE 32 #define AES_KEYSIZE_128 16 #define AES_KEYSIZE_192 24 #define AES_KEYSIZE_256 32 #define AES_BLOCK_SIZE 16 #define AES_MAX_KEYLENGTH (15 * 16) #define AES_MAX_KEYLENGTH_U32 (AES_MAX_KEYLENGTH / sizeof(u32)) /* * Please ensure that the first two fields are 16-byte aligned * relative to the start of the structure, i.e., don't move them! */ struct crypto_aes_ctx { u32 key_enc[AES_MAX_KEYLENGTH_U32]; u32 key_dec[AES_MAX_KEYLENGTH_U32]; u32 key_length; }; extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned; extern const u32 crypto_it_tab[4][256] ____cacheline_aligned; /* * validate key length for AES algorithms */ static inline int aes_check_keylen(unsigned int keylen) { switch (keylen) { case AES_KEYSIZE_128: case AES_KEYSIZE_192: case AES_KEYSIZE_256: break; default: return -EINVAL; } return 0; } int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len); /** * aes_expandkey - Expands the AES key as described in FIPS-197 * @ctx: The location where the computed key will be stored. * @in_key: The supplied key. * @key_len: The length of the supplied key. * * Returns 0 on success. The function fails only if an invalid key size (or * pointer) is supplied. * The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes * key schedule plus a 16 bytes key which is used before the first round). * The decryption key is prepared for the "Equivalent Inverse Cipher" as * described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is * for the initial combination, the second slot for the first round and so on. */ int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len); /** * aes_encrypt - Encrypt a single AES block * @ctx: Context struct containing the key schedule * @out: Buffer to store the ciphertext * @in: Buffer containing the plaintext */ void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); /** * aes_decrypt - Decrypt a single AES block * @ctx: Context struct containing the key schedule * @out: Buffer to store the plaintext * @in: Buffer containing the ciphertext */ void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); extern const u8 crypto_aes_sbox[]; extern const u8 crypto_aes_inv_sbox[]; void aescfb_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src, int len, const u8 iv[AES_BLOCK_SIZE]); void aescfb_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src, int len, const u8 iv[AES_BLOCK_SIZE]); #endif
1 1 1963 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KCOV_H #define _LINUX_KCOV_H #include <linux/sched.h> #include <uapi/linux/kcov.h> struct task_struct; #ifdef CONFIG_KCOV enum kcov_mode { /* Coverage collection is not enabled yet. */ KCOV_MODE_DISABLED = 0, /* KCOV was initialized, but tracing mode hasn't been chosen yet. */ KCOV_MODE_INIT = 1, /* * Tracing coverage collection mode. * Covered PCs are collected in a per-task buffer. */ KCOV_MODE_TRACE_PC = 2, /* Collecting comparison operands mode. */ KCOV_MODE_TRACE_CMP = 3, /* The process owns a KCOV remote reference. */ KCOV_MODE_REMOTE = 4, }; #define KCOV_IN_CTXSW (1 << 30) void kcov_task_init(struct task_struct *t); void kcov_task_exit(struct task_struct *t); #define kcov_prepare_switch(t) \ do { \ (t)->kcov_mode |= KCOV_IN_CTXSW; \ } while (0) #define kcov_finish_switch(t) \ do { \ (t)->kcov_mode &= ~KCOV_IN_CTXSW; \ } while (0) /* See Documentation/dev-tools/kcov.rst for usage details. */ void kcov_remote_start(u64 handle); void kcov_remote_stop(void); u64 kcov_common_handle(void); static inline void kcov_remote_start_common(u64 id) { kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_COMMON, id)); } static inline void kcov_remote_start_usb(u64 id) { kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_USB, id)); } /* * The softirq flavor of kcov_remote_*() functions is introduced as a temporary * work around for kcov's lack of nested remote coverage sections support in * task context. Adding support for nested sections is tracked in: * https://bugzilla.kernel.org/show_bug.cgi?id=210337 */ static inline void kcov_remote_start_usb_softirq(u64 id) { if (in_serving_softirq() && !in_hardirq()) kcov_remote_start_usb(id); } static inline void kcov_remote_stop_softirq(void) { if (in_serving_softirq() && !in_hardirq()) kcov_remote_stop(); } #ifdef CONFIG_64BIT typedef unsigned long kcov_u64; #else typedef unsigned long long kcov_u64; #endif void __sanitizer_cov_trace_pc(void); void __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2); void __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2); void __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2); void __sanitizer_cov_trace_cmp8(kcov_u64 arg1, kcov_u64 arg2); void __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2); void __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2); void __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2); void __sanitizer_cov_trace_const_cmp8(kcov_u64 arg1, kcov_u64 arg2); void __sanitizer_cov_trace_switch(kcov_u64 val, void *cases); #else static inline void kcov_task_init(struct task_struct *t) {} static inline void kcov_task_exit(struct task_struct *t) {} static inline void kcov_prepare_switch(struct task_struct *t) {} static inline void kcov_finish_switch(struct task_struct *t) {} static inline void kcov_remote_start(u64 handle) {} static inline void kcov_remote_stop(void) {} static inline u64 kcov_common_handle(void) { return 0; } static inline void kcov_remote_start_common(u64 id) {} static inline void kcov_remote_start_usb(u64 id) {} static inline void kcov_remote_start_usb_softirq(u64 id) {} static inline void kcov_remote_stop_softirq(void) {} #endif /* CONFIG_KCOV */ #endif /* _LINUX_KCOV_H */
363 373 340 340 2 338 308 2 309 373 375 2 1 6 303 308 308 332 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 // SPDX-License-Identifier: GPL-2.0-only /* * lib/parser.c - simple parser for mount, etc. options. */ #include <linux/ctype.h> #include <linux/types.h> #include <linux/export.h> #include <linux/kstrtox.h> #include <linux/parser.h> #include <linux/slab.h> #include <linux/string.h> /* * max size needed by different bases to express U64 * HEX: "0xFFFFFFFFFFFFFFFF" --> 18 * DEC: "18446744073709551615" --> 20 * OCT: "01777777777777777777777" --> 23 * pick the max one to define NUMBER_BUF_LEN */ #define NUMBER_BUF_LEN 24 /** * match_one - Determines if a string matches a simple pattern * @s: the string to examine for presence of the pattern * @p: the string containing the pattern * @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match * locations. * * Description: Determines if the pattern @p is present in string @s. Can only * match extremely simple token=arg style patterns. If the pattern is found, * the location(s) of the arguments will be returned in the @args array. */ static int match_one(char *s, const char *p, substring_t args[]) { char *meta; int argc = 0; if (!p) return 1; while(1) { int len = -1; meta = strchr(p, '%'); if (!meta) return strcmp(p, s) == 0; if (strncmp(p, s, meta-p)) return 0; s += meta - p; p = meta + 1; if (isdigit(*p)) len = simple_strtoul(p, (char **) &p, 10); else if (*p == '%') { if (*s++ != '%') return 0; p++; continue; } if (argc >= MAX_OPT_ARGS) return 0; args[argc].from = s; switch (*p++) { case 's': { size_t str_len = strlen(s); if (str_len == 0) return 0; if (len == -1 || len > str_len) len = str_len; args[argc].to = s + len; break; } case 'd': simple_strtol(s, &args[argc].to, 0); goto num; case 'u': simple_strtoul(s, &args[argc].to, 0); goto num; case 'o': simple_strtoul(s, &args[argc].to, 8); goto num; case 'x': simple_strtoul(s, &args[argc].to, 16); num: if (args[argc].to == args[argc].from) return 0; break; default: return 0; } s = args[argc].to; argc++; } } /** * match_token - Find a token (and optional args) in a string * @s: the string to examine for token/argument pairs * @table: match_table_t describing the set of allowed option tokens and the * arguments that may be associated with them. Must be terminated with a * &struct match_token whose pattern is set to the NULL pointer. * @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match * locations. * * Description: Detects which if any of a set of token strings has been passed * to it. Tokens can include up to %MAX_OPT_ARGS instances of basic c-style * format identifiers which will be taken into account when matching the * tokens, and whose locations will be returned in the @args array. */ int match_token(char *s, const match_table_t table, substring_t args[]) { const struct match_token *p; for (p = table; !match_one(s, p->pattern, args) ; p++) ; return p->token; } EXPORT_SYMBOL(match_token); /** * match_number - scan a number in the given base from a substring_t * @s: substring to be scanned * @result: resulting integer on success * @base: base to use when converting string * * Description: Given a &substring_t and a base, attempts to parse the substring * as a number in that base. * * Return: On success, sets @result to the integer represented by the * string and returns 0. Returns -EINVAL or -ERANGE on failure. */ static int match_number(substring_t *s, int *result, int base) { char *endp; char buf[NUMBER_BUF_LEN]; int ret; long val; if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN) return -ERANGE; ret = 0; val = simple_strtol(buf, &endp, base); if (endp == buf) ret = -EINVAL; else if (val < (long)INT_MIN || val > (long)INT_MAX) ret = -ERANGE; else *result = (int) val; return ret; } /** * match_u64int - scan a number in the given base from a substring_t * @s: substring to be scanned * @result: resulting u64 on success * @base: base to use when converting string * * Description: Given a &substring_t and a base, attempts to parse the substring * as a number in that base. * * Return: On success, sets @result to the integer represented by the * string and returns 0. Returns -EINVAL or -ERANGE on failure. */ static int match_u64int(substring_t *s, u64 *result, int base) { char buf[NUMBER_BUF_LEN]; int ret; u64 val; if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN) return -ERANGE; ret = kstrtoull(buf, base, &val); if (!ret) *result = val; return ret; } /** * match_int - scan a decimal representation of an integer from a substring_t * @s: substring_t to be scanned * @result: resulting integer on success * * Description: Attempts to parse the &substring_t @s as a decimal integer. * * Return: On success, sets @result to the integer represented by the string * and returns 0. Returns -EINVAL or -ERANGE on failure. */ int match_int(substring_t *s, int *result) { return match_number(s, result, 0); } EXPORT_SYMBOL(match_int); /** * match_uint - scan a decimal representation of an integer from a substring_t * @s: substring_t to be scanned * @result: resulting integer on success * * Description: Attempts to parse the &substring_t @s as a decimal integer. * * Return: On success, sets @result to the integer represented by the string * and returns 0. Returns -EINVAL or -ERANGE on failure. */ int match_uint(substring_t *s, unsigned int *result) { char buf[NUMBER_BUF_LEN]; if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN) return -ERANGE; return kstrtouint(buf, 10, result); } EXPORT_SYMBOL(match_uint); /** * match_u64 - scan a decimal representation of a u64 from * a substring_t * @s: substring_t to be scanned * @result: resulting unsigned long long on success * * Description: Attempts to parse the &substring_t @s as a long decimal * integer. * * Return: On success, sets @result to the integer represented by the string * and returns 0. Returns -EINVAL or -ERANGE on failure. */ int match_u64(substring_t *s, u64 *result) { return match_u64int(s, result, 0); } EXPORT_SYMBOL(match_u64); /** * match_octal - scan an octal representation of an integer from a substring_t * @s: substring_t to be scanned * @result: resulting integer on success * * Description: Attempts to parse the &substring_t @s as an octal integer. * * Return: On success, sets @result to the integer represented by the string * and returns 0. Returns -EINVAL or -ERANGE on failure. */ int match_octal(substring_t *s, int *result) { return match_number(s, result, 8); } EXPORT_SYMBOL(match_octal); /** * match_hex - scan a hex representation of an integer from a substring_t * @s: substring_t to be scanned * @result: resulting integer on success * * Description: Attempts to parse the &substring_t @s as a hexadecimal integer. * * Return: On success, sets @result to the integer represented by the string * and returns 0. Returns -EINVAL or -ERANGE on failure. */ int match_hex(substring_t *s, int *result) { return match_number(s, result, 16); } EXPORT_SYMBOL(match_hex); /** * match_wildcard - parse if a string matches given wildcard pattern * @pattern: wildcard pattern * @str: the string to be parsed * * Description: Parse the string @str to check if matches wildcard * pattern @pattern. The pattern may contain two types of wildcards: * * * '*' - matches zero or more characters * * '?' - matches one character * * Return: If the @str matches the @pattern, return true, else return false. */ bool match_wildcard(const char *pattern, const char *str) { const char *s = str; const char *p = pattern; bool star = false; while (*s) { switch (*p) { case '?': s++; p++; break; case '*': star = true; str = s; if (!*++p) return true; pattern = p; break; default: if (*s == *p) { s++; p++; } else { if (!star) return false; str++; s = str; p = pattern; } break; } } if (*p == '*') ++p; return !*p; } EXPORT_SYMBOL(match_wildcard); /** * match_strlcpy - Copy the characters from a substring_t to a sized buffer * @dest: where to copy to * @src: &substring_t to copy * @size: size of destination buffer * * Description: Copy the characters in &substring_t @src to the * c-style string @dest. Copy no more than @size - 1 characters, plus * the terminating NUL. * * Return: length of @src. */ size_t match_strlcpy(char *dest, const substring_t *src, size_t size) { size_t ret = src->to - src->from; if (size) { size_t len = ret >= size ? size - 1 : ret; memcpy(dest, src->from, len); dest[len] = '\0'; } return ret; } EXPORT_SYMBOL(match_strlcpy); /** * match_strdup - allocate a new string with the contents of a substring_t * @s: &substring_t to copy * * Description: Allocates and returns a string filled with the contents of * the &substring_t @s. The caller is responsible for freeing the returned * string with kfree(). * * Return: the address of the newly allocated NUL-terminated string or * %NULL on error. */ char *match_strdup(const substring_t *s) { return kmemdup_nul(s->from, s->to - s->from, GFP_KERNEL); } EXPORT_SYMBOL(match_strdup);
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 // SPDX-License-Identifier: GPL-2.0-or-later /* * Topro TP6800/6810 webcam driver. * * Copyright (C) 2011 Jean-François Moine (http://moinejf.free.fr) * Copyright (C) 2009 Anders Blomdell (anders.blomdell@control.lth.se) * Copyright (C) 2008 Thomas Champagne (lafeuil@gmail.com) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "gspca.h" MODULE_DESCRIPTION("Topro TP6800/6810 gspca webcam driver"); MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, Anders Blomdell <anders.blomdell@control.lth.se>"); MODULE_LICENSE("GPL"); static int force_sensor = -1; /* JPEG header */ static const u8 jpeg_head[] = { 0xff, 0xd8, /* jpeg */ /* quantization table quality 50% */ 0xff, 0xdb, 0x00, 0x84, /* DQT */ 0, #define JPEG_QT0_OFFSET 7 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e, 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28, 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25, 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33, 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44, 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57, 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71, 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63, 1, #define JPEG_QT1_OFFSET 72 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a, 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, /* Define Huffman table (thanks to Thomas Kaiser) */ 0xff, 0xc4, 0x01, 0x5e, 0x00, 0x00, 0x02, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x06, 0x01, 0x00, 0x00, 0x57, 0x01, 0x02, 0x03, 0x00, 0x11, 0x04, 0x12, 0x21, 0x31, 0x13, 0x41, 0x51, 0x61, 0x05, 0x22, 0x32, 0x14, 0x71, 0x81, 0x91, 0x15, 0x23, 0x42, 0x52, 0x62, 0xa1, 0xb1, 0x06, 0x33, 0x72, 0xc1, 0xd1, 0x24, 0x43, 0x53, 0x82, 0x16, 0x34, 0x92, 0xa2, 0xe1, 0xf1, 0xf0, 0x07, 0x08, 0x17, 0x18, 0x25, 0x26, 0x27, 0x28, 0x35, 0x36, 0x37, 0x38, 0x44, 0x45, 0x46, 0x47, 0x48, 0x54, 0x55, 0x56, 0x57, 0x58, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x11, 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x06, 0x01, 0x00, 0x00, 0x57, 0x00, 0x01, 0x11, 0x02, 0x21, 0x03, 0x12, 0x31, 0x41, 0x13, 0x22, 0x51, 0x61, 0x04, 0x32, 0x71, 0x05, 0x14, 0x23, 0x42, 0x33, 0x52, 0x81, 0x91, 0xa1, 0xb1, 0xf0, 0x06, 0x15, 0xc1, 0xd1, 0xe1, 0x24, 0x43, 0x62, 0xf1, 0x16, 0x25, 0x34, 0x53, 0x72, 0x82, 0x92, 0x07, 0x08, 0x17, 0x18, 0x26, 0x27, 0x28, 0x35, 0x36, 0x37, 0x38, 0x44, 0x45, 0x46, 0x47, 0x48, 0x54, 0x55, 0x56, 0x57, 0x58, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xff, 0xc0, 0x00, 0x11, /* SOF0 (start of frame 0 */ 0x08, /* data precision */ #define JPEG_HEIGHT_OFFSET 493 0x01, 0xe0, /* height */ 0x02, 0x80, /* width */ 0x03, /* component number */ 0x01, 0x21, /* samples Y = jpeg 422 */ 0x00, /* quant Y */ 0x02, 0x11, 0x01, /* samples CbCr - quant CbCr */ 0x03, 0x11, 0x01, 0xff, 0xda, 0x00, 0x0c, /* SOS (start of scan) */ 0x03, 0x01, 0x00, 0x02, 0x11, 0x03, 0x11, 0x00, 0x3f, 0x00 #define JPEG_HDR_SZ 521 }; struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct v4l2_ctrl *jpegqual; struct v4l2_ctrl *sharpness; struct v4l2_ctrl *gamma; struct v4l2_ctrl *blue; struct v4l2_ctrl *red; u8 framerate; u8 quality; /* webcam current JPEG quality (0..16) */ s8 ag_cnt; /* autogain / start counter for tp6810 */ #define AG_CNT_START 13 /* check gain every N frames */ u8 bridge; u8 sensor; u8 jpeg_hdr[JPEG_HDR_SZ]; }; enum bridges { BRIDGE_TP6800, BRIDGE_TP6810, }; enum sensors { SENSOR_CX0342, SENSOR_SOI763A, /* ~= ov7630 / ov7648 */ NSENSORS }; static const struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 4 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG}, {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG} }; /* * JPEG quality * index: webcam compression * value: JPEG quality in % */ static const u8 jpeg_q[17] = { 88, 77, 67, 57, 55, 55, 45, 45, 36, 36, 30, 30, 26, 26, 22, 22, 94 }; #define BULK_OUT_SIZE 0x20 #if BULK_OUT_SIZE > USB_BUF_SZ #error "USB buffer too small" #endif #define DEFAULT_FRAME_RATE 30 static const u8 rates[] = {30, 20, 15, 10, 7, 5}; static const struct framerates framerates[] = { { .rates = rates, .nrates = ARRAY_SIZE(rates) }, { .rates = rates, .nrates = ARRAY_SIZE(rates) } }; static const u8 rates_6810[] = {30, 15, 10, 7, 5}; static const struct framerates framerates_6810[] = { { .rates = rates_6810, .nrates = ARRAY_SIZE(rates_6810) }, { .rates = rates_6810, .nrates = ARRAY_SIZE(rates_6810) } }; /* * webcam quality in % * the last value is the ultra fine quality */ /* TP6800 register offsets */ #define TP6800_R10_SIF_TYPE 0x10 #define TP6800_R11_SIF_CONTROL 0x11 #define TP6800_R12_SIF_ADDR_S 0x12 #define TP6800_R13_SIF_TX_DATA 0x13 #define TP6800_R14_SIF_RX_DATA 0x14 #define TP6800_R15_GPIO_PU 0x15 #define TP6800_R16_GPIO_PD 0x16 #define TP6800_R17_GPIO_IO 0x17 #define TP6800_R18_GPIO_DATA 0x18 #define TP6800_R19_SIF_ADDR_S2 0x19 #define TP6800_R1A_SIF_TX_DATA2 0x1a #define TP6800_R1B_SIF_RX_DATA2 0x1b #define TP6800_R21_ENDP_1_CTL 0x21 #define TP6800_R2F_TIMING_CFG 0x2f #define TP6800_R30_SENSOR_CFG 0x30 #define TP6800_R31_PIXEL_START 0x31 #define TP6800_R32_PIXEL_END_L 0x32 #define TP6800_R33_PIXEL_END_H 0x33 #define TP6800_R34_LINE_START 0x34 #define TP6800_R35_LINE_END_L 0x35 #define TP6800_R36_LINE_END_H 0x36 #define TP6800_R37_FRONT_DARK_ST 0x37 #define TP6800_R38_FRONT_DARK_END 0x38 #define TP6800_R39_REAR_DARK_ST_L 0x39 #define TP6800_R3A_REAR_DARK_ST_H 0x3a #define TP6800_R3B_REAR_DARK_END_L 0x3b #define TP6800_R3C_REAR_DARK_END_H 0x3c #define TP6800_R3D_HORIZ_DARK_LINE_L 0x3d #define TP6800_R3E_HORIZ_DARK_LINE_H 0x3e #define TP6800_R3F_FRAME_RATE 0x3f #define TP6800_R50 0x50 #define TP6800_R51 0x51 #define TP6800_R52 0x52 #define TP6800_R53 0x53 #define TP6800_R54_DARK_CFG 0x54 #define TP6800_R55_GAMMA_R 0x55 #define TP6800_R56_GAMMA_G 0x56 #define TP6800_R57_GAMMA_B 0x57 #define TP6800_R5C_EDGE_THRLD 0x5c #define TP6800_R5D_DEMOSAIC_CFG 0x5d #define TP6800_R78_FORMAT 0x78 #define TP6800_R79_QUALITY 0x79 #define TP6800_R7A_BLK_THRLD 0x7a /* CX0342 register offsets */ #define CX0342_SENSOR_ID 0x00 #define CX0342_VERSION_NO 0x01 #define CX0342_ORG_X_L 0x02 #define CX0342_ORG_X_H 0x03 #define CX0342_ORG_Y_L 0x04 #define CX0342_ORG_Y_H 0x05 #define CX0342_STOP_X_L 0x06 #define CX0342_STOP_X_H 0x07 #define CX0342_STOP_Y_L 0x08 #define CX0342_STOP_Y_H 0x09 #define CX0342_FRAME_WIDTH_L 0x0a #define CX0342_FRAME_WIDTH_H 0x0b #define CX0342_FRAME_HEIGH_L 0x0c #define CX0342_FRAME_HEIGH_H 0x0d #define CX0342_EXPO_LINE_L 0x10 #define CX0342_EXPO_LINE_H 0x11 #define CX0342_EXPO_CLK_L 0x12 #define CX0342_EXPO_CLK_H 0x13 #define CX0342_RAW_GRGAIN_L 0x14 #define CX0342_RAW_GRGAIN_H 0x15 #define CX0342_RAW_GBGAIN_L 0x16 #define CX0342_RAW_GBGAIN_H 0x17 #define CX0342_RAW_RGAIN_L 0x18 #define CX0342_RAW_RGAIN_H 0x19 #define CX0342_RAW_BGAIN_L 0x1a #define CX0342_RAW_BGAIN_H 0x1b #define CX0342_GLOBAL_GAIN 0x1c #define CX0342_SYS_CTRL_0 0x20 #define CX0342_SYS_CTRL_1 0x21 #define CX0342_SYS_CTRL_2 0x22 #define CX0342_BYPASS_MODE 0x23 #define CX0342_SYS_CTRL_3 0x24 #define CX0342_TIMING_EN 0x25 #define CX0342_OUTPUT_CTRL 0x26 #define CX0342_AUTO_ADC_CALIB 0x27 #define CX0342_SYS_CTRL_4 0x28 #define CX0342_ADCGN 0x30 #define CX0342_SLPCR 0x31 #define CX0342_SLPFN_LO 0x32 #define CX0342_ADC_CTL 0x33 #define CX0342_LVRST_BLBIAS 0x34 #define CX0342_VTHSEL 0x35 #define CX0342_RAMP_RIV 0x36 #define CX0342_LDOSEL 0x37 #define CX0342_CLOCK_GEN 0x40 #define CX0342_SOFT_RESET 0x41 #define CX0342_PLL 0x42 #define CX0342_DR_ENH_PULSE_OFFSET_L 0x43 #define CX0342_DR_ENH_PULSE_OFFSET_H 0x44 #define CX0342_DR_ENH_PULSE_POS_L 0x45 #define CX0342_DR_ENH_PULSE_POS_H 0x46 #define CX0342_DR_ENH_PULSE_WIDTH 0x47 #define CX0342_AS_CURRENT_CNT_L 0x48 #define CX0342_AS_CURRENT_CNT_H 0x49 #define CX0342_AS_PREVIOUS_CNT_L 0x4a #define CX0342_AS_PREVIOUS_CNT_H 0x4b #define CX0342_SPV_VALUE_L 0x4c #define CX0342_SPV_VALUE_H 0x4d #define CX0342_GPXLTHD_L 0x50 #define CX0342_GPXLTHD_H 0x51 #define CX0342_RBPXLTHD_L 0x52 #define CX0342_RBPXLTHD_H 0x53 #define CX0342_PLANETHD_L 0x54 #define CX0342_PLANETHD_H 0x55 #define CX0342_ROWDARK_TH 0x56 #define CX0342_ROWDARK_TOL 0x57 #define CX0342_RB_GAP_L 0x58 #define CX0342_RB_GAP_H 0x59 #define CX0342_G_GAP_L 0x5a #define CX0342_G_GAP_H 0x5b #define CX0342_AUTO_ROW_DARK 0x60 #define CX0342_MANUAL_DARK_VALUE 0x61 #define CX0342_GB_DARK_OFFSET 0x62 #define CX0342_GR_DARK_OFFSET 0x63 #define CX0342_RED_DARK_OFFSET 0x64 #define CX0342_BLUE_DARK_OFFSET 0x65 #define CX0342_DATA_SCALING_MULTI 0x66 #define CX0342_AUTOD_Q_FRAME 0x67 #define CX0342_AUTOD_ALLOW_VARI 0x68 #define CX0342_AUTO_DARK_VALUE_L 0x69 #define CX0342_AUTO_DARK_VALUE_H 0x6a #define CX0342_IO_CTRL_0 0x70 #define CX0342_IO_CTRL_1 0x71 #define CX0342_IO_CTRL_2 0x72 #define CX0342_IDLE_CTRL 0x73 #define CX0342_TEST_MODE 0x74 #define CX0342_FRAME_FIX_DATA_TEST 0x75 #define CX0342_FRAME_CNT_TEST 0x76 #define CX0342_RST_OVERFLOW_L 0x80 #define CX0342_RST_OVERFLOW_H 0x81 #define CX0342_RST_UNDERFLOW_L 0x82 #define CX0342_RST_UNDERFLOW_H 0x83 #define CX0342_DATA_OVERFLOW_L 0x84 #define CX0342_DATA_OVERFLOW_H 0x85 #define CX0342_DATA_UNDERFLOW_L 0x86 #define CX0342_DATA_UNDERFLOW_H 0x87 #define CX0342_CHANNEL_0_0_L_irst 0x90 #define CX0342_CHANNEL_0_0_H_irst 0x91 #define CX0342_CHANNEL_0_1_L_irst 0x92 #define CX0342_CHANNEL_0_1_H_irst 0x93 #define CX0342_CHANNEL_0_2_L_irst 0x94 #define CX0342_CHANNEL_0_2_H_irst 0x95 #define CX0342_CHANNEL_0_3_L_irst 0x96 #define CX0342_CHANNEL_0_3_H_irst 0x97 #define CX0342_CHANNEL_0_4_L_irst 0x98 #define CX0342_CHANNEL_0_4_H_irst 0x99 #define CX0342_CHANNEL_0_5_L_irst 0x9a #define CX0342_CHANNEL_0_5_H_irst 0x9b #define CX0342_CHANNEL_0_6_L_irst 0x9c #define CX0342_CHANNEL_0_6_H_irst 0x9d #define CX0342_CHANNEL_0_7_L_irst 0x9e #define CX0342_CHANNEL_0_7_H_irst 0x9f #define CX0342_CHANNEL_1_0_L_itx 0xa0 #define CX0342_CHANNEL_1_0_H_itx 0xa1 #define CX0342_CHANNEL_1_1_L_itx 0xa2 #define CX0342_CHANNEL_1_1_H_itx 0xa3 #define CX0342_CHANNEL_1_2_L_itx 0xa4 #define CX0342_CHANNEL_1_2_H_itx 0xa5 #define CX0342_CHANNEL_1_3_L_itx 0xa6 #define CX0342_CHANNEL_1_3_H_itx 0xa7 #define CX0342_CHANNEL_1_4_L_itx 0xa8 #define CX0342_CHANNEL_1_4_H_itx 0xa9 #define CX0342_CHANNEL_1_5_L_itx 0xaa #define CX0342_CHANNEL_1_5_H_itx 0xab #define CX0342_CHANNEL_1_6_L_itx 0xac #define CX0342_CHANNEL_1_6_H_itx 0xad #define CX0342_CHANNEL_1_7_L_itx 0xae #define CX0342_CHANNEL_1_7_H_itx 0xaf #define CX0342_CHANNEL_2_0_L_iwl 0xb0 #define CX0342_CHANNEL_2_0_H_iwl 0xb1 #define CX0342_CHANNEL_2_1_L_iwl 0xb2 #define CX0342_CHANNEL_2_1_H_iwl 0xb3 #define CX0342_CHANNEL_2_2_L_iwl 0xb4 #define CX0342_CHANNEL_2_2_H_iwl 0xb5 #define CX0342_CHANNEL_2_3_L_iwl 0xb6 #define CX0342_CHANNEL_2_3_H_iwl 0xb7 #define CX0342_CHANNEL_2_4_L_iwl 0xb8 #define CX0342_CHANNEL_2_4_H_iwl 0xb9 #define CX0342_CHANNEL_2_5_L_iwl 0xba #define CX0342_CHANNEL_2_5_H_iwl 0xbb #define CX0342_CHANNEL_2_6_L_iwl 0xbc #define CX0342_CHANNEL_2_6_H_iwl 0xbd #define CX0342_CHANNEL_2_7_L_iwl 0xbe #define CX0342_CHANNEL_2_7_H_iwl 0xbf #define CX0342_CHANNEL_3_0_L_ensp 0xc0 #define CX0342_CHANNEL_3_0_H_ensp 0xc1 #define CX0342_CHANNEL_3_1_L_ensp 0xc2 #define CX0342_CHANNEL_3_1_H_ensp 0xc3 #define CX0342_CHANNEL_3_2_L_ensp 0xc4 #define CX0342_CHANNEL_3_2_H_ensp 0xc5 #define CX0342_CHANNEL_3_3_L_ensp 0xc6 #define CX0342_CHANNEL_3_3_H_ensp 0xc7 #define CX0342_CHANNEL_3_4_L_ensp 0xc8 #define CX0342_CHANNEL_3_4_H_ensp 0xc9 #define CX0342_CHANNEL_3_5_L_ensp 0xca #define CX0342_CHANNEL_3_5_H_ensp 0xcb #define CX0342_CHANNEL_3_6_L_ensp 0xcc #define CX0342_CHANNEL_3_6_H_ensp 0xcd #define CX0342_CHANNEL_3_7_L_ensp 0xce #define CX0342_CHANNEL_3_7_H_ensp 0xcf #define CX0342_CHANNEL_4_0_L_sela 0xd0 #define CX0342_CHANNEL_4_0_H_sela 0xd1 #define CX0342_CHANNEL_4_1_L_sela 0xd2 #define CX0342_CHANNEL_4_1_H_sela 0xd3 #define CX0342_CHANNEL_5_0_L_intla 0xe0 #define CX0342_CHANNEL_5_0_H_intla 0xe1 #define CX0342_CHANNEL_5_1_L_intla 0xe2 #define CX0342_CHANNEL_5_1_H_intla 0xe3 #define CX0342_CHANNEL_5_2_L_intla 0xe4 #define CX0342_CHANNEL_5_2_H_intla 0xe5 #define CX0342_CHANNEL_5_3_L_intla 0xe6 #define CX0342_CHANNEL_5_3_H_intla 0xe7 #define CX0342_CHANNEL_6_0_L_xa_sel_pos 0xf0 #define CX0342_CHANNEL_6_0_H_xa_sel_pos 0xf1 #define CX0342_CHANNEL_7_1_L_cds_pos 0xf2 #define CX0342_CHANNEL_7_1_H_cds_pos 0xf3 #define CX0342_SENSOR_HEIGHT_L 0xfb #define CX0342_SENSOR_HEIGHT_H 0xfc #define CX0342_SENSOR_WIDTH_L 0xfd #define CX0342_SENSOR_WIDTH_H 0xfe #define CX0342_VSYNC_HSYNC_READ 0xff struct cmd { u8 reg; u8 val; }; static const u8 DQT[17][130] = { /* Define quantization table (thanks to Thomas Kaiser) */ { /* Quality 0 */ 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x01, 0x04, 0x04, 0x04, 0x06, 0x05, 0x06, 0x0b, 0x06, 0x06, 0x0b, 0x18, 0x10, 0x0e, 0x10, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, }, { /* Quality 1 */ 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x01, 0x08, 0x09, 0x09, 0x0c, 0x0a, 0x0c, 0x17, 0x0d, 0x0d, 0x17, 0x31, 0x21, 0x1c, 0x21, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, }, { /* Quality 2 */ 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x01, 0x0c, 0x0d, 0x0d, 0x12, 0x0f, 0x12, 0x23, 0x13, 0x13, 0x23, 0x4a, 0x31, 0x2a, 0x31, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, }, { /* Quality 3 */ 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x01, 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a, 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, }, { /* Quality 4 */ 0x00, 0x04, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x0a, 0x0a, 0x0a, 0x05, 0x05, 0x05, 0x05, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x01, 0x11, 0x16, 0x16, 0x1e, 0x1a, 0x1e, 0x3a, 0x20, 0x20, 0x3a, 0x7b, 0x52, 0x46, 0x52, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, }, { /* Quality 5 */ 0x00, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x0c, 0x0c, 0x0c, 0x06, 0x06, 0x06, 0x06, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x01, 0x11, 0x1b, 0x1b, 0x24, 0x1f, 0x24, 0x46, 0x27, 0x27, 0x46, 0x94, 0x63, 0x54, 0x63, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, }, { /* Quality 6 */ 0x00, 0x05, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x0e, 0x0e, 0x0e, 0x07, 0x07, 0x07, 0x07, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x01, 0x15, 0x1f, 0x1f, 0x2a, 0x24, 0x2a, 0x52, 0x2d, 0x2d, 0x52, 0xad, 0x73, 0x62, 0x73, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, }, { /* Quality 7 */ 0x00, 0x05, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x10, 0x10, 0x10, 0x08, 0x08, 0x08, 0x08, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x01, 0x15, 0x24, 0x24, 0x30, 0x2a, 0x30, 0x5e, 0x34, 0x34, 0x5e, 0xc6, 0x84, 0x70, 0x84, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, }, { /* Quality 8 */ 0x00, 0x06, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x14, 0x14, 0x14, 0x0a, 0x0a, 0x0a, 0x0a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x01, 0x19, 0x2d, 0x2d, 0x3c, 0x34, 0x3c, 0x75, 0x41, 0x41, 0x75, 0xf7, 0xa5, 0x8c, 0xa5, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, }, { /* Quality 9 */ 0x00, 0x06, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x18, 0x18, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x01, 0x19, 0x36, 0x36, 0x48, 0x3f, 0x48, 0x8d, 0x4e, 0x4e, 0x8d, 0xff, 0xc6, 0xa8, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, { /* Quality 10 */ 0x00, 0x07, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x1c, 0x1c, 0x1c, 0x0e, 0x0e, 0x0e, 0x0e, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x01, 0x1d, 0x3f, 0x3f, 0x54, 0x49, 0x54, 0xa4, 0x5b, 0x5b, 0xa4, 0xff, 0xe7, 0xc4, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, { /* Quality 11 */ 0x00, 0x07, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x20, 0x20, 0x20, 0x10, 0x10, 0x10, 0x10, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x01, 0x1d, 0x48, 0x48, 0x60, 0x54, 0x60, 0xbc, 0x68, 0x68, 0xbc, 0xff, 0xff, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, { /* Quality 12 */ 0x00, 0x08, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x28, 0x28, 0x28, 0x14, 0x14, 0x14, 0x14, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x01, 0x22, 0x5a, 0x5a, 0x78, 0x69, 0x78, 0xeb, 0x82, 0x82, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, { /* Quality 13 */ 0x00, 0x08, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x30, 0x30, 0x30, 0x18, 0x18, 0x18, 0x18, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x01, 0x22, 0x6c, 0x6c, 0x90, 0x7e, 0x90, 0xff, 0x9c, 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, { /* Quality 14 */ 0x00, 0x0a, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x38, 0x38, 0x38, 0x1c, 0x1c, 0x1c, 0x1c, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x01, 0x2a, 0x7e, 0x7e, 0xa8, 0x93, 0xa8, 0xff, 0xb6, 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, { /* Quality 15 */ 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x40, 0x40, 0x40, 0x20, 0x20, 0x20, 0x20, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x01, 0x2a, 0x90, 0x90, 0xc0, 0xa8, 0xc0, 0xff, 0xd0, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, { /* Quality 16-31 */ 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x01, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, } }; static const struct cmd tp6810_cx_init_common[] = { {0x1c, 0x00}, {TP6800_R10_SIF_TYPE, 0x00}, {0x4e, 0x00}, {0x4f, 0x00}, {TP6800_R50, 0xff}, {TP6800_R51, 0x03}, {0x00, 0x07}, {TP6800_R79_QUALITY, 0x03}, {TP6800_R2F_TIMING_CFG, 0x37}, {TP6800_R30_SENSOR_CFG, 0x10}, {TP6800_R21_ENDP_1_CTL, 0x00}, {TP6800_R52, 0x40}, {TP6800_R53, 0x40}, {TP6800_R54_DARK_CFG, 0x40}, {TP6800_R30_SENSOR_CFG, 0x18}, {0x4b, 0x00}, {TP6800_R3F_FRAME_RATE, 0x83}, {TP6800_R79_QUALITY, 0x05}, {TP6800_R21_ENDP_1_CTL, 0x00}, {0x7c, 0x04}, {0x25, 0x14}, {0x26, 0x0f}, {0x7b, 0x10}, }; static const struct cmd tp6810_ov_init_common[] = { {0x1c, 0x00}, {TP6800_R10_SIF_TYPE, 0x00}, {0x4e, 0x00}, {0x4f, 0x00}, {TP6800_R50, 0xff}, {TP6800_R51, 0x03}, {0x00, 0x07}, {TP6800_R52, 0x40}, {TP6800_R53, 0x40}, {TP6800_R54_DARK_CFG, 0x40}, {TP6800_R79_QUALITY, 0x03}, {TP6800_R2F_TIMING_CFG, 0x17}, {TP6800_R30_SENSOR_CFG, 0x18}, {TP6800_R21_ENDP_1_CTL, 0x00}, {TP6800_R3F_FRAME_RATE, 0x86}, {0x25, 0x18}, {0x26, 0x0f}, {0x7b, 0x90}, }; static const struct cmd tp6810_bridge_start[] = { {0x59, 0x88}, {0x5a, 0x0f}, {0x5b, 0x4e}, {TP6800_R5C_EDGE_THRLD, 0x63}, {TP6800_R5D_DEMOSAIC_CFG, 0x00}, {0x03, 0x7f}, {0x04, 0x80}, {0x06, 0x00}, {0x00, 0x00}, }; static const struct cmd tp6810_late_start[] = { {0x7d, 0x01}, {0xb0, 0x04}, {0xb1, 0x04}, {0xb2, 0x04}, {0xb3, 0x04}, {0xb4, 0x04}, {0xb5, 0x04}, {0xb6, 0x08}, {0xb7, 0x08}, {0xb8, 0x04}, {0xb9, 0x04}, {0xba, 0x04}, {0xbb, 0x04}, {0xbc, 0x04}, {0xbd, 0x08}, {0xbe, 0x08}, {0xbf, 0x08}, {0xc0, 0x04}, {0xc1, 0x04}, {0xc2, 0x08}, {0xc3, 0x08}, {0xc4, 0x08}, {0xc5, 0x08}, {0xc6, 0x08}, {0xc7, 0x13}, {0xc8, 0x04}, {0xc9, 0x08}, {0xca, 0x08}, {0xcb, 0x08}, {0xcc, 0x08}, {0xcd, 0x08}, {0xce, 0x13}, {0xcf, 0x13}, {0xd0, 0x08}, {0xd1, 0x08}, {0xd2, 0x08}, {0xd3, 0x08}, {0xd4, 0x08}, {0xd5, 0x13}, {0xd6, 0x13}, {0xd7, 0x13}, {0xd8, 0x08}, {0xd9, 0x08}, {0xda, 0x08}, {0xdb, 0x08}, {0xdc, 0x13}, {0xdd, 0x13}, {0xde, 0x13}, {0xdf, 0x13}, {0xe0, 0x08}, {0xe1, 0x08}, {0xe2, 0x08}, {0xe3, 0x13}, {0xe4, 0x13}, {0xe5, 0x13}, {0xe6, 0x13}, {0xe7, 0x13}, {0xe8, 0x08}, {0xe9, 0x08}, {0xea, 0x13}, {0xeb, 0x13}, {0xec, 0x13}, {0xed, 0x13}, {0xee, 0x13}, {0xef, 0x13}, {0x7d, 0x02}, /* later after isoc start */ {0x7d, 0x08}, {0x7d, 0x00}, }; static const struct cmd cx0342_timing_seq[] = { {CX0342_CHANNEL_0_1_L_irst, 0x20}, {CX0342_CHANNEL_0_2_L_irst, 0x24}, {CX0342_CHANNEL_0_2_H_irst, 0x00}, {CX0342_CHANNEL_0_3_L_irst, 0x2f}, {CX0342_CHANNEL_0_3_H_irst, 0x00}, {CX0342_CHANNEL_1_0_L_itx, 0x02}, {CX0342_CHANNEL_1_0_H_itx, 0x00}, {CX0342_CHANNEL_1_1_L_itx, 0x20}, {CX0342_CHANNEL_1_1_H_itx, 0x00}, {CX0342_CHANNEL_1_2_L_itx, 0xe4}, {CX0342_CHANNEL_1_2_H_itx, 0x00}, {CX0342_CHANNEL_1_3_L_itx, 0xee}, {CX0342_CHANNEL_1_3_H_itx, 0x00}, {CX0342_CHANNEL_2_0_L_iwl, 0x30}, {CX0342_CHANNEL_2_0_H_iwl, 0x00}, {CX0342_CHANNEL_3_0_L_ensp, 0x34}, {CX0342_CHANNEL_3_1_L_ensp, 0xe2}, {CX0342_CHANNEL_3_1_H_ensp, 0x00}, {CX0342_CHANNEL_3_2_L_ensp, 0xf6}, {CX0342_CHANNEL_3_2_H_ensp, 0x00}, {CX0342_CHANNEL_3_3_L_ensp, 0xf4}, {CX0342_CHANNEL_3_3_H_ensp, 0x02}, {CX0342_CHANNEL_4_0_L_sela, 0x26}, {CX0342_CHANNEL_4_0_H_sela, 0x00}, {CX0342_CHANNEL_4_1_L_sela, 0xe2}, {CX0342_CHANNEL_4_1_H_sela, 0x00}, {CX0342_CHANNEL_5_0_L_intla, 0x26}, {CX0342_CHANNEL_5_1_L_intla, 0x29}, {CX0342_CHANNEL_5_2_L_intla, 0xf0}, {CX0342_CHANNEL_5_2_H_intla, 0x00}, {CX0342_CHANNEL_5_3_L_intla, 0xf3}, {CX0342_CHANNEL_5_3_H_intla, 0x00}, {CX0342_CHANNEL_6_0_L_xa_sel_pos, 0x24}, {CX0342_CHANNEL_7_1_L_cds_pos, 0x02}, {CX0342_TIMING_EN, 0x01}, }; /* define the JPEG header */ static void jpeg_define(u8 *jpeg_hdr, int height, int width) { memcpy(jpeg_hdr, jpeg_head, sizeof jpeg_head); jpeg_hdr[JPEG_HEIGHT_OFFSET + 0] = height >> 8; jpeg_hdr[JPEG_HEIGHT_OFFSET + 1] = height; jpeg_hdr[JPEG_HEIGHT_OFFSET + 2] = width >> 8; jpeg_hdr[JPEG_HEIGHT_OFFSET + 3] = width; } /* set the JPEG quality for sensor soi763a */ static void jpeg_set_qual(u8 *jpeg_hdr, int quality) { int i, sc; if (quality <= 0) sc = 5000; else if (quality < 50) sc = 5000 / quality; else sc = 200 - quality * 2; for (i = 0; i < 64; i++) { jpeg_hdr[JPEG_QT0_OFFSET + i] = (jpeg_head[JPEG_QT0_OFFSET + i] * sc + 50) / 100; jpeg_hdr[JPEG_QT1_OFFSET + i] = (jpeg_head[JPEG_QT1_OFFSET + i] * sc + 50) / 100; } } static void reg_w(struct gspca_dev *gspca_dev, u8 index, u8 value) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x0e, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 500); if (ret < 0) { pr_err("reg_w err %d\n", ret); gspca_dev->usb_err = ret; } } /* the returned value is in gspca_dev->usb_buf */ static void reg_r(struct gspca_dev *gspca_dev, u8 index) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x0d, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, gspca_dev->usb_buf, 1, 500); if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; } } static void reg_w_buf(struct gspca_dev *gspca_dev, const struct cmd *p, int l) { do { reg_w(gspca_dev, p->reg, p->val); p++; } while (--l > 0); } static int i2c_w(struct gspca_dev *gspca_dev, u8 index, u8 value) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, TP6800_R11_SIF_CONTROL, 0x00); reg_w(gspca_dev, TP6800_R19_SIF_ADDR_S2, index); reg_w(gspca_dev, TP6800_R13_SIF_TX_DATA, value); reg_w(gspca_dev, TP6800_R11_SIF_CONTROL, 0x01); if (sd->bridge == BRIDGE_TP6800) return 0; msleep(5); reg_r(gspca_dev, TP6800_R11_SIF_CONTROL); if (gspca_dev->usb_buf[0] == 0) return 0; reg_w(gspca_dev, TP6800_R11_SIF_CONTROL, 0x00); return -1; /* error */ } static void i2c_w_buf(struct gspca_dev *gspca_dev, const struct cmd *p, int l) { do { i2c_w(gspca_dev, p->reg, p->val); p++; } while (--l > 0); } static int i2c_r(struct gspca_dev *gspca_dev, u8 index, int len) { struct sd *sd = (struct sd *) gspca_dev; int v; reg_w(gspca_dev, TP6800_R19_SIF_ADDR_S2, index); reg_w(gspca_dev, TP6800_R11_SIF_CONTROL, 0x02); msleep(5); reg_r(gspca_dev, TP6800_R14_SIF_RX_DATA); v = gspca_dev->usb_buf[0]; if (sd->bridge == BRIDGE_TP6800) return v; if (len > 1) { reg_r(gspca_dev, TP6800_R1B_SIF_RX_DATA2); v |= (gspca_dev->usb_buf[0] << 8); } reg_r(gspca_dev, TP6800_R11_SIF_CONTROL); if (gspca_dev->usb_buf[0] == 0) return v; reg_w(gspca_dev, TP6800_R11_SIF_CONTROL, 0x00); return -1; } static void bulk_w(struct gspca_dev *gspca_dev, u8 tag, const u8 *data, int length) { struct usb_device *dev = gspca_dev->dev; int count, actual_count, ret; if (gspca_dev->usb_err < 0) return; for (;;) { count = length > BULK_OUT_SIZE - 1 ? BULK_OUT_SIZE - 1 : length; gspca_dev->usb_buf[0] = tag; memcpy(&gspca_dev->usb_buf[1], data, count); ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 3), gspca_dev->usb_buf, count + 1, &actual_count, 500); if (ret < 0) { pr_err("bulk write error %d tag=%02x\n", ret, tag); gspca_dev->usb_err = ret; return; } length -= count; if (length <= 0) break; data += count; } } static int probe_6810(struct gspca_dev *gspca_dev) { u8 gpio; int ret; reg_r(gspca_dev, TP6800_R18_GPIO_DATA); gpio = gspca_dev->usb_buf[0]; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R10_SIF_TYPE, 0x04); /* i2c 16 bits */ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x21); /* ov??? */ reg_w(gspca_dev, TP6800_R1A_SIF_TX_DATA2, 0x00); if (i2c_w(gspca_dev, 0x00, 0x00) >= 0) return SENSOR_SOI763A; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R10_SIF_TYPE, 0x00); /* i2c 8 bits */ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x7f); /* (unknown i2c) */ if (i2c_w(gspca_dev, 0x00, 0x00) >= 0) return -2; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R10_SIF_TYPE, 0x00); /* i2c 8 bits */ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x11); /* tas??? / hv??? */ ret = i2c_r(gspca_dev, 0x00, 1); if (ret > 0) return -3; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x6e); /* po??? */ ret = i2c_r(gspca_dev, 0x00, 1); if (ret > 0) return -4; ret = i2c_r(gspca_dev, 0x01, 1); if (ret > 0) return -5; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R10_SIF_TYPE, 0x04); /* i2c 16 bits */ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x5d); /* mi/mt??? */ ret = i2c_r(gspca_dev, 0x00, 2); if (ret > 0) return -6; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x5c); /* mi/mt??? */ ret = i2c_r(gspca_dev, 0x36, 2); if (ret > 0) return -7; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x61); /* (unknown i2c) */ reg_w(gspca_dev, TP6800_R1A_SIF_TX_DATA2, 0x10); if (i2c_w(gspca_dev, 0xff, 0x00) >= 0) return -8; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R10_SIF_TYPE, 0x00); /* i2c 8 bits */ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x20); /* cx0342 */ ret = i2c_r(gspca_dev, 0x00, 1); if (ret > 0) return SENSOR_CX0342; return -9; } static void cx0342_6810_init(struct gspca_dev *gspca_dev) { static const struct cmd reg_init_1[] = { {TP6800_R2F_TIMING_CFG, 0x2f}, {0x25, 0x02}, {TP6800_R21_ENDP_1_CTL, 0x00}, {TP6800_R3F_FRAME_RATE, 0x80}, {TP6800_R2F_TIMING_CFG, 0x2f}, {TP6800_R18_GPIO_DATA, 0xe1}, {TP6800_R18_GPIO_DATA, 0xc1}, {TP6800_R18_GPIO_DATA, 0xe1}, {TP6800_R11_SIF_CONTROL, 0x00}, }; static const struct cmd reg_init_2[] = { {TP6800_R78_FORMAT, 0x48}, {TP6800_R11_SIF_CONTROL, 0x00}, }; static const struct cmd sensor_init[] = { {CX0342_OUTPUT_CTRL, 0x07}, {CX0342_BYPASS_MODE, 0x58}, {CX0342_GPXLTHD_L, 0x28}, {CX0342_RBPXLTHD_L, 0x28}, {CX0342_PLANETHD_L, 0x50}, {CX0342_PLANETHD_H, 0x03}, {CX0342_RB_GAP_L, 0xff}, {CX0342_RB_GAP_H, 0x07}, {CX0342_G_GAP_L, 0xff}, {CX0342_G_GAP_H, 0x07}, {CX0342_RST_OVERFLOW_L, 0x5c}, {CX0342_RST_OVERFLOW_H, 0x01}, {CX0342_DATA_OVERFLOW_L, 0xfc}, {CX0342_DATA_OVERFLOW_H, 0x03}, {CX0342_DATA_UNDERFLOW_L, 0x00}, {CX0342_DATA_UNDERFLOW_H, 0x00}, {CX0342_SYS_CTRL_0, 0x40}, {CX0342_GLOBAL_GAIN, 0x01}, {CX0342_CLOCK_GEN, 0x00}, {CX0342_SYS_CTRL_0, 0x02}, {CX0342_IDLE_CTRL, 0x05}, {CX0342_ADCGN, 0x00}, {CX0342_ADC_CTL, 0x00}, {CX0342_LVRST_BLBIAS, 0x01}, {CX0342_VTHSEL, 0x0b}, {CX0342_RAMP_RIV, 0x0b}, {CX0342_LDOSEL, 0x07}, {CX0342_SPV_VALUE_L, 0x40}, {CX0342_SPV_VALUE_H, 0x02}, {CX0342_AUTO_ADC_CALIB, 0x81}, {CX0342_TIMING_EN, 0x01}, }; reg_w_buf(gspca_dev, reg_init_1, ARRAY_SIZE(reg_init_1)); reg_w_buf(gspca_dev, tp6810_cx_init_common, ARRAY_SIZE(tp6810_cx_init_common)); reg_w_buf(gspca_dev, reg_init_2, ARRAY_SIZE(reg_init_2)); reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x20); /* cx0342 I2C addr */ i2c_w_buf(gspca_dev, sensor_init, ARRAY_SIZE(sensor_init)); i2c_w_buf(gspca_dev, cx0342_timing_seq, ARRAY_SIZE(cx0342_timing_seq)); } static void soi763a_6810_init(struct gspca_dev *gspca_dev) { static const struct cmd reg_init_1[] = { {TP6800_R2F_TIMING_CFG, 0x2f}, {TP6800_R18_GPIO_DATA, 0xe1}, {0x25, 0x02}, {TP6800_R21_ENDP_1_CTL, 0x00}, {TP6800_R3F_FRAME_RATE, 0x80}, {TP6800_R2F_TIMING_CFG, 0x2f}, {TP6800_R18_GPIO_DATA, 0xc1}, }; static const struct cmd reg_init_2[] = { {TP6800_R78_FORMAT, 0x54}, }; static const struct cmd sensor_init[] = { {0x00, 0x00}, {0x01, 0x80}, {0x02, 0x80}, {0x03, 0x90}, {0x04, 0x20}, {0x05, 0x20}, {0x06, 0x80}, {0x07, 0x00}, {0x08, 0xff}, {0x09, 0xff}, {0x0a, 0x76}, /* 7630 = soi673a */ {0x0b, 0x30}, {0x0c, 0x20}, {0x0d, 0x20}, {0x0e, 0xff}, {0x0f, 0xff}, {0x10, 0x41}, {0x15, 0x14}, {0x11, 0x40}, {0x12, 0x48}, {0x13, 0x80}, {0x14, 0x80}, {0x16, 0x03}, {0x28, 0xb0}, {0x71, 0x20}, {0x75, 0x8e}, {0x17, 0x1b}, {0x18, 0xbd}, {0x19, 0x05}, {0x1a, 0xf6}, {0x1b, 0x04}, {0x1c, 0x7f}, /* omnivision */ {0x1d, 0xa2}, {0x1e, 0x00}, {0x1f, 0x00}, {0x20, 0x45}, {0x21, 0x80}, {0x22, 0x80}, {0x23, 0xee}, {0x24, 0x50}, {0x25, 0x7a}, {0x26, 0xa0}, {0x27, 0x9a}, {0x29, 0x30}, {0x2a, 0x80}, {0x2b, 0x00}, {0x2c, 0xac}, {0x2d, 0x05}, {0x2e, 0x80}, {0x2f, 0x3c}, {0x30, 0x22}, {0x31, 0x00}, {0x32, 0x86}, {0x33, 0x08}, {0x34, 0xff}, {0x35, 0xff}, {0x36, 0xff}, {0x37, 0xff}, {0x38, 0xff}, {0x39, 0xff}, {0x3a, 0xfe}, {0x3b, 0xfe}, {0x3c, 0xfe}, {0x3d, 0xfe}, {0x3e, 0xfe}, {0x3f, 0x71}, {0x40, 0xff}, {0x41, 0xff}, {0x42, 0xff}, {0x43, 0xff}, {0x44, 0xff}, {0x45, 0xff}, {0x46, 0xff}, {0x47, 0xff}, {0x48, 0xff}, {0x49, 0xff}, {0x4a, 0xfe}, {0x4b, 0xff}, {0x4c, 0x00}, {0x4d, 0x00}, {0x4e, 0xff}, {0x4f, 0xff}, {0x50, 0xff}, {0x51, 0xff}, {0x52, 0xff}, {0x53, 0xff}, {0x54, 0xff}, {0x55, 0xff}, {0x56, 0xff}, {0x57, 0xff}, {0x58, 0xff}, {0x59, 0xff}, {0x5a, 0xff}, {0x5b, 0xfe}, {0x5c, 0xff}, {0x5d, 0x8f}, {0x5e, 0xff}, {0x5f, 0x8f}, {0x60, 0xa2}, {0x61, 0x4a}, {0x62, 0xf3}, {0x63, 0x75}, {0x64, 0xf0}, {0x65, 0x00}, {0x66, 0x55}, {0x67, 0x92}, {0x68, 0xa0}, {0x69, 0x4a}, {0x6a, 0x22}, {0x6b, 0x00}, {0x6c, 0x33}, {0x6d, 0x44}, {0x6e, 0x22}, {0x6f, 0x84}, {0x70, 0x0b}, {0x72, 0x10}, {0x73, 0x50}, {0x74, 0x21}, {0x76, 0x00}, {0x77, 0xa5}, {0x78, 0x80}, {0x79, 0x80}, {0x7a, 0x80}, {0x7b, 0xe2}, {0x7c, 0x00}, {0x7d, 0xf7}, {0x7e, 0x00}, {0x7f, 0x00}, }; reg_w_buf(gspca_dev, reg_init_1, ARRAY_SIZE(reg_init_1)); reg_w_buf(gspca_dev, tp6810_ov_init_common, ARRAY_SIZE(tp6810_ov_init_common)); reg_w_buf(gspca_dev, reg_init_2, ARRAY_SIZE(reg_init_2)); i2c_w(gspca_dev, 0x12, 0x80); /* sensor reset */ msleep(10); i2c_w_buf(gspca_dev, sensor_init, ARRAY_SIZE(sensor_init)); } /* set the gain and exposure */ static void setexposure(struct gspca_dev *gspca_dev, s32 expo, s32 gain, s32 blue, s32 red) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_CX0342) { expo = (expo << 2) - 1; i2c_w(gspca_dev, CX0342_EXPO_LINE_L, expo); i2c_w(gspca_dev, CX0342_EXPO_LINE_H, expo >> 8); if (sd->bridge == BRIDGE_TP6800) i2c_w(gspca_dev, CX0342_RAW_GBGAIN_H, gain >> 8); i2c_w(gspca_dev, CX0342_RAW_GBGAIN_L, gain); if (sd->bridge == BRIDGE_TP6800) i2c_w(gspca_dev, CX0342_RAW_GRGAIN_H, gain >> 8); i2c_w(gspca_dev, CX0342_RAW_GRGAIN_L, gain); if (sd->sensor == SENSOR_CX0342) { if (sd->bridge == BRIDGE_TP6800) i2c_w(gspca_dev, CX0342_RAW_BGAIN_H, blue >> 8); i2c_w(gspca_dev, CX0342_RAW_BGAIN_L, blue); if (sd->bridge == BRIDGE_TP6800) i2c_w(gspca_dev, CX0342_RAW_RGAIN_H, red >> 8); i2c_w(gspca_dev, CX0342_RAW_RGAIN_L, red); } i2c_w(gspca_dev, CX0342_SYS_CTRL_0, sd->bridge == BRIDGE_TP6800 ? 0x80 : 0x81); return; } /* soi763a */ i2c_w(gspca_dev, 0x10, /* AEC_H (exposure time) */ expo); /* i2c_w(gspca_dev, 0x76, 0x02); * AEC_L ([1:0] */ i2c_w(gspca_dev, 0x00, /* gain */ gain); } /* set the JPEG quantization tables */ static void set_dqt(struct gspca_dev *gspca_dev, u8 q) { struct sd *sd = (struct sd *) gspca_dev; /* update the jpeg quantization tables */ gspca_dbg(gspca_dev, D_STREAM, "q %d -> %d\n", sd->quality, q); sd->quality = q; if (q > 16) q = 16; if (sd->sensor == SENSOR_SOI763A) jpeg_set_qual(sd->jpeg_hdr, jpeg_q[q]); else memcpy(&sd->jpeg_hdr[JPEG_QT0_OFFSET - 1], DQT[q], sizeof DQT[0]); } /* set the JPEG compression quality factor */ static void setquality(struct gspca_dev *gspca_dev, s32 q) { struct sd *sd = (struct sd *) gspca_dev; if (q != 16) q = 15 - q; reg_w(gspca_dev, TP6800_R7A_BLK_THRLD, 0x00); reg_w(gspca_dev, TP6800_R79_QUALITY, 0x04); reg_w(gspca_dev, TP6800_R79_QUALITY, q); /* auto quality */ if (q == 15 && sd->bridge == BRIDGE_TP6810) { msleep(4); reg_w(gspca_dev, TP6800_R7A_BLK_THRLD, 0x19); } } static const u8 color_null[18] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 color_gain[NSENSORS][18] = { [SENSOR_CX0342] = {0x4c, 0x00, 0xa9, 0x00, 0x31, 0x00, /* Y R/G/B (LE values) */ 0xb6, 0x03, 0x6c, 0x03, 0xe0, 0x00, /* U R/G/B */ 0xdf, 0x00, 0x46, 0x03, 0xdc, 0x03}, /* V R/G/B */ [SENSOR_SOI763A] = {0x4c, 0x00, 0x95, 0x00, 0x1d, 0x00, /* Y R/G/B (LE values) */ 0xb6, 0x03, 0x6c, 0x03, 0xd7, 0x00, /* U R/G/B */ 0xd5, 0x00, 0x46, 0x03, 0xdc, 0x03}, /* V R/G/B */ }; static void setgamma(struct gspca_dev *gspca_dev, s32 gamma) { struct sd *sd = (struct sd *) gspca_dev; #define NGAMMA 6 static const u8 gamma_tb[NGAMMA][3][1024] = { { /* gamma 0 - from tp6800 + soi763a */ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x05, 0x07, 0x07, 0x08, 0x09, 0x09, 0x0a, 0x0c, 0x0c, 0x0d, 0x0e, 0x0e, 0x10, 0x11, 0x11, 0x12, 0x14, 0x14, 0x15, 0x16, 0x16, 0x17, 0x17, 0x18, 0x1a, 0x1a, 0x1b, 0x1b, 0x1c, 0x1e, 0x1e, 0x1f, 0x1f, 0x20, 0x20, 0x22, 0x23, 0x23, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27, 0x28, 0x28, 0x29, 0x29, 0x2b, 0x2c, 0x2c, 0x2d, 0x2d, 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x33, 0x33, 0x34, 0x34, 0x34, 0x35, 0x35, 0x37, 0x37, 0x38, 0x38, 0x39, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3b, 0x3c, 0x3c, 0x3d, 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x40, 0x42, 0x42, 0x43, 0x43, 0x44, 0x44, 0x44, 0x45, 0x45, 0x47, 0x47, 0x47, 0x48, 0x48, 0x49, 0x49, 0x4a, 0x4a, 0x4a, 0x4b, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x50, 0x52, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x54, 0x55, 0x55, 0x55, 0x56, 0x56, 0x58, 0x58, 0x58, 0x59, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b, 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5e, 0x5f, 0x5f, 0x5f, 0x60, 0x60, 0x60, 0x61, 0x61, 0x61, 0x62, 0x62, 0x62, 0x63, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x68, 0x69, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x77, 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x05, 0x07, 0x07, 0x08, 0x09, 0x09, 0x0a, 0x0c, 0x0c, 0x0d, 0x0e, 0x0e, 0x10, 0x11, 0x11, 0x12, 0x14, 0x14, 0x15, 0x16, 0x16, 0x17, 0x17, 0x18, 0x1a, 0x1a, 0x1b, 0x1b, 0x1c, 0x1e, 0x1e, 0x1f, 0x1f, 0x20, 0x20, 0x22, 0x23, 0x23, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27, 0x28, 0x28, 0x29, 0x29, 0x2b, 0x2c, 0x2c, 0x2d, 0x2d, 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x33, 0x33, 0x34, 0x34, 0x34, 0x35, 0x35, 0x37, 0x37, 0x38, 0x38, 0x39, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3b, 0x3c, 0x3c, 0x3d, 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x40, 0x42, 0x42, 0x43, 0x43, 0x44, 0x44, 0x44, 0x45, 0x45, 0x47, 0x47, 0x47, 0x48, 0x48, 0x49, 0x49, 0x4a, 0x4a, 0x4a, 0x4b, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x50, 0x52, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x54, 0x55, 0x55, 0x55, 0x56, 0x56, 0x58, 0x58, 0x58, 0x59, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b, 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5e, 0x5f, 0x5f, 0x5f, 0x60, 0x60, 0x60, 0x61, 0x61, 0x61, 0x62, 0x62, 0x62, 0x63, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x68, 0x69, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x77, 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x05, 0x07, 0x07, 0x08, 0x09, 0x09, 0x0a, 0x0c, 0x0c, 0x0d, 0x0e, 0x0e, 0x10, 0x11, 0x11, 0x12, 0x14, 0x14, 0x15, 0x16, 0x16, 0x17, 0x17, 0x18, 0x1a, 0x1a, 0x1b, 0x1b, 0x1c, 0x1e, 0x1e, 0x1f, 0x1f, 0x20, 0x20, 0x22, 0x23, 0x23, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27, 0x28, 0x28, 0x29, 0x29, 0x2b, 0x2c, 0x2c, 0x2d, 0x2d, 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x33, 0x33, 0x34, 0x34, 0x34, 0x35, 0x35, 0x37, 0x37, 0x38, 0x38, 0x39, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3b, 0x3c, 0x3c, 0x3d, 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x40, 0x42, 0x42, 0x43, 0x43, 0x44, 0x44, 0x44, 0x45, 0x45, 0x47, 0x47, 0x47, 0x48, 0x48, 0x49, 0x49, 0x4a, 0x4a, 0x4a, 0x4b, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x50, 0x52, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x54, 0x55, 0x55, 0x55, 0x56, 0x56, 0x58, 0x58, 0x58, 0x59, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b, 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5e, 0x5f, 0x5f, 0x5f, 0x60, 0x60, 0x60, 0x61, 0x61, 0x61, 0x62, 0x62, 0x62, 0x63, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x68, 0x69, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x76, 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb} }, { /* gamma 1 - from tp6810 + soi763a */ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x05, 0x07, 0x08, 0x09, 0x0a, 0x0c, 0x0d, 0x0e, 0x10, 0x11, 0x12, 0x14, 0x15, 0x16, 0x17, 0x18, 0x1a, 0x1a, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x22, 0x22, 0x23, 0x25, 0x26, 0x27, 0x27, 0x28, 0x29, 0x2b, 0x2b, 0x2c, 0x2d, 0x2f, 0x2f, 0x30, 0x31, 0x33, 0x33, 0x34, 0x35, 0x35, 0x37, 0x38, 0x38, 0x39, 0x3a, 0x3a, 0x3b, 0x3c, 0x3c, 0x3d, 0x3f, 0x3f, 0x40, 0x42, 0x42, 0x43, 0x43, 0x44, 0x45, 0x45, 0x47, 0x47, 0x48, 0x49, 0x49, 0x4a, 0x4a, 0x4b, 0x4b, 0x4c, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x55, 0x56, 0x56, 0x58, 0x58, 0x59, 0x59, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5e, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x61, 0x62, 0x62, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x68, 0x68, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x75, 0x75, 0x77, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x05, 0x07, 0x07, 0x08, 0x09, 0x0a, 0x0c, 0x0d, 0x0e, 0x10, 0x10, 0x11, 0x12, 0x14, 0x15, 0x15, 0x16, 0x17, 0x18, 0x1a, 0x1a, 0x1b, 0x1c, 0x1e, 0x1e, 0x1f, 0x20, 0x20, 0x22, 0x23, 0x25, 0x25, 0x26, 0x27, 0x27, 0x28, 0x29, 0x29, 0x2b, 0x2c, 0x2c, 0x2d, 0x2d, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x33, 0x34, 0x34, 0x35, 0x35, 0x37, 0x38, 0x38, 0x39, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3c, 0x3d, 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x42, 0x42, 0x43, 0x43, 0x44, 0x44, 0x45, 0x45, 0x47, 0x47, 0x48, 0x48, 0x49, 0x49, 0x4a, 0x4a, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x52, 0x52, 0x53, 0x53, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, 0x56, 0x56, 0x58, 0x58, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x5f, 0x60, 0x60, 0x60, 0x61, 0x61, 0x62, 0x62, 0x62, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x05, 0x05, 0x07, 0x08, 0x09, 0x0a, 0x0a, 0x0c, 0x0d, 0x0e, 0x0e, 0x10, 0x11, 0x12, 0x12, 0x14, 0x15, 0x16, 0x16, 0x17, 0x18, 0x18, 0x1a, 0x1b, 0x1b, 0x1c, 0x1e, 0x1e, 0x1f, 0x1f, 0x20, 0x22, 0x22, 0x23, 0x23, 0x25, 0x26, 0x26, 0x27, 0x27, 0x28, 0x29, 0x29, 0x2b, 0x2b, 0x2c, 0x2c, 0x2d, 0x2d, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x33, 0x33, 0x34, 0x34, 0x35, 0x35, 0x37, 0x37, 0x38, 0x38, 0x39, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3b, 0x3c, 0x3c, 0x3d, 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x42, 0x42, 0x42, 0x43, 0x43, 0x44, 0x44, 0x45, 0x45, 0x47, 0x47, 0x47, 0x48, 0x48, 0x49, 0x49, 0x49, 0x4a, 0x4a, 0x4b, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x50, 0x52, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x54, 0x55, 0x55, 0x55, 0x56, 0x56, 0x58, 0x58, 0x58, 0x59, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b, 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5e, 0x5f, 0x5f, 0x5f, 0x60, 0x60, 0x60, 0x61, 0x61, 0x61, 0x62, 0x62, 0x62, 0x63, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x68, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x75, 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }, { /* gamma 2 */ {0x00, 0x01, 0x02, 0x05, 0x07, 0x08, 0x0a, 0x0c, 0x0d, 0x0e, 0x10, 0x12, 0x14, 0x15, 0x16, 0x17, 0x18, 0x1a, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x22, 0x23, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2b, 0x2c, 0x2d, 0x2d, 0x2f, 0x30, 0x31, 0x33, 0x34, 0x34, 0x35, 0x37, 0x38, 0x38, 0x39, 0x3a, 0x3b, 0x3b, 0x3c, 0x3d, 0x3f, 0x3f, 0x40, 0x42, 0x42, 0x43, 0x44, 0x44, 0x45, 0x47, 0x47, 0x48, 0x49, 0x49, 0x4a, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x52, 0x53, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, 0x56, 0x58, 0x58, 0x59, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x61, 0x62, 0x62, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x68, 0x68, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x75, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb}, {0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x05, 0x07, 0x08, 0x09, 0x0a, 0x0d, 0x0e, 0x10, 0x11, 0x12, 0x14, 0x15, 0x16, 0x16, 0x17, 0x18, 0x1a, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x20, 0x22, 0x23, 0x25, 0x26, 0x26, 0x27, 0x28, 0x29, 0x29, 0x2b, 0x2c, 0x2d, 0x2d, 0x2f, 0x30, 0x30, 0x31, 0x33, 0x33, 0x34, 0x35, 0x35, 0x37, 0x38, 0x38, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3c, 0x3d, 0x3d, 0x3f, 0x3f, 0x40, 0x42, 0x42, 0x43, 0x43, 0x44, 0x44, 0x45, 0x45, 0x47, 0x47, 0x48, 0x48, 0x49, 0x4a, 0x4a, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, 0x56, 0x56, 0x58, 0x58, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x61, 0x61, 0x62, 0x62, 0x63, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x68, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb}, {0x00, 0x00, 0x00, 0x01, 0x02, 0x05, 0x07, 0x08, 0x09, 0x0a, 0x0c, 0x0e, 0x10, 0x11, 0x12, 0x14, 0x15, 0x16, 0x17, 0x18, 0x1a, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x20, 0x22, 0x23, 0x25, 0x26, 0x27, 0x28, 0x28, 0x29, 0x2b, 0x2c, 0x2d, 0x2d, 0x2f, 0x30, 0x31, 0x31, 0x33, 0x34, 0x35, 0x35, 0x37, 0x38, 0x38, 0x39, 0x3a, 0x3a, 0x3b, 0x3c, 0x3c, 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x42, 0x43, 0x43, 0x44, 0x44, 0x45, 0x47, 0x47, 0x48, 0x48, 0x49, 0x4a, 0x4a, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, 0x56, 0x58, 0x58, 0x59, 0x59, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x61, 0x61, 0x62, 0x62, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb3, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb} }, { /* gamma 3 - from tp6810 + cx0342 */ {0x08, 0x09, 0x0c, 0x0d, 0x10, 0x11, 0x14, 0x15, 0x17, 0x18, 0x1a, 0x1c, 0x1e, 0x1f, 0x20, 0x23, 0x25, 0x26, 0x27, 0x28, 0x2b, 0x2c, 0x2d, 0x2f, 0x30, 0x31, 0x33, 0x34, 0x35, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3f, 0x40, 0x42, 0x43, 0x44, 0x45, 0x47, 0x48, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4d, 0x4f, 0x50, 0x52, 0x53, 0x53, 0x54, 0x55, 0x56, 0x56, 0x58, 0x59, 0x5a, 0x5a, 0x5b, 0x5c, 0x5c, 0x5e, 0x5f, 0x5f, 0x60, 0x61, 0x61, 0x62, 0x63, 0x63, 0x65, 0x66, 0x66, 0x67, 0x68, 0x68, 0x69, 0x69, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x77, 0x77, 0x78, 0x78, 0x79, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81, 0x82, 0x82, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88, 0x88, 0x89, 0x89, 0x8a, 0x8a, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8f, 0x8f, 0x90, 0x90, 0x91, 0x91, 0x91, 0x92, 0x92, 0x93, 0x93, 0x93, 0x94, 0x94, 0x96, 0x96, 0x97, 0x97, 0x97, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0x03, 0x05, 0x07, 0x09, 0x0a, 0x0c, 0x0d, 0x10, 0x11, 0x12, 0x14, 0x15, 0x17, 0x18, 0x1a, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x22, 0x23, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2b, 0x2c, 0x2c, 0x2d, 0x2f, 0x30, 0x31, 0x33, 0x33, 0x34, 0x35, 0x37, 0x38, 0x38, 0x39, 0x3a, 0x3b, 0x3b, 0x3c, 0x3d, 0x3f, 0x3f, 0x40, 0x42, 0x42, 0x43, 0x44, 0x45, 0x45, 0x47, 0x47, 0x48, 0x49, 0x49, 0x4a, 0x4b, 0x4b, 0x4c, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, 0x58, 0x58, 0x59, 0x59, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x61, 0x62, 0x62, 0x63, 0x63, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x69, 0x69, 0x6a, 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x71, 0x71, 0x73, 0x73, 0x74, 0x74, 0x74, 0x75, 0x75, 0x77, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0x07, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17, 0x18, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x23, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2b, 0x2d, 0x2f, 0x30, 0x31, 0x33, 0x34, 0x35, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3b, 0x3c, 0x3d, 0x3f, 0x40, 0x42, 0x43, 0x44, 0x44, 0x45, 0x47, 0x48, 0x49, 0x4a, 0x4a, 0x4b, 0x4c, 0x4d, 0x4d, 0x4f, 0x50, 0x52, 0x52, 0x53, 0x54, 0x55, 0x55, 0x56, 0x58, 0x58, 0x59, 0x5a, 0x5b, 0x5b, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x61, 0x61, 0x62, 0x63, 0x63, 0x65, 0x65, 0x66, 0x67, 0x67, 0x68, 0x68, 0x69, 0x6a, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e, 0x6e, 0x6f, 0x70, 0x70, 0x71, 0x71, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86, 0x88, 0x88, 0x88, 0x89, 0x89, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x92, 0x92, 0x92, 0x93, 0x93, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x97, 0x97, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }, { /* gamma 4 - from tp6800 + soi763a */ {0x11, 0x14, 0x15, 0x17, 0x1a, 0x1b, 0x1e, 0x1f, 0x22, 0x23, 0x25, 0x27, 0x28, 0x2b, 0x2c, 0x2d, 0x2f, 0x31, 0x33, 0x34, 0x35, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x40, 0x42, 0x43, 0x44, 0x45, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4f, 0x50, 0x52, 0x53, 0x53, 0x54, 0x55, 0x56, 0x58, 0x59, 0x5a, 0x5b, 0x5b, 0x5c, 0x5e, 0x5f, 0x60, 0x61, 0x61, 0x62, 0x63, 0x65, 0x65, 0x66, 0x67, 0x68, 0x68, 0x69, 0x6a, 0x6c, 0x6c, 0x6d, 0x6e, 0x6f, 0x6f, 0x70, 0x71, 0x71, 0x73, 0x74, 0x74, 0x75, 0x77, 0x77, 0x78, 0x79, 0x79, 0x7a, 0x7a, 0x7b, 0x7c, 0x7c, 0x7d, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81, 0x82, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86, 0x88, 0x89, 0x89, 0x8a, 0x8a, 0x8b, 0x8b, 0x8d, 0x8d, 0x8e, 0x8e, 0x8f, 0x90, 0x90, 0x91, 0x91, 0x92, 0x92, 0x93, 0x93, 0x94, 0x94, 0x96, 0x96, 0x97, 0x97, 0x98, 0x98, 0x98, 0x99, 0x99, 0x9a, 0x9a, 0x9b, 0x9b, 0x9c, 0x9c, 0x9d, 0x9d, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xac, 0xac, 0xad, 0xad, 0xad, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb}, {0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x11, 0x14, 0x15, 0x16, 0x17, 0x1a, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x23, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2b, 0x2c, 0x2d, 0x2f, 0x30, 0x31, 0x33, 0x34, 0x34, 0x35, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3c, 0x3d, 0x3f, 0x40, 0x42, 0x42, 0x43, 0x44, 0x45, 0x45, 0x47, 0x48, 0x49, 0x49, 0x4a, 0x4b, 0x4b, 0x4c, 0x4d, 0x4f, 0x4f, 0x50, 0x52, 0x52, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, 0x58, 0x58, 0x59, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x61, 0x62, 0x63, 0x63, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x68, 0x68, 0x69, 0x69, 0x6a, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e, 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x71, 0x73, 0x73, 0x74, 0x74, 0x74, 0x75, 0x75, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xaa, 0xab, 0xab, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb}, {0x0d, 0x10, 0x11, 0x14, 0x15, 0x17, 0x18, 0x1b, 0x1c, 0x1e, 0x20, 0x22, 0x23, 0x26, 0x27, 0x28, 0x29, 0x2b, 0x2d, 0x2f, 0x30, 0x31, 0x33, 0x34, 0x35, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3f, 0x40, 0x42, 0x43, 0x44, 0x45, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4b, 0x4c, 0x4d, 0x4f, 0x50, 0x52, 0x52, 0x53, 0x54, 0x55, 0x56, 0x56, 0x58, 0x59, 0x5a, 0x5a, 0x5b, 0x5c, 0x5e, 0x5e, 0x5f, 0x60, 0x60, 0x61, 0x62, 0x62, 0x63, 0x65, 0x65, 0x66, 0x67, 0x67, 0x68, 0x69, 0x69, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x77, 0x78, 0x78, 0x79, 0x79, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81, 0x82, 0x82, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86, 0x88, 0x88, 0x89, 0x89, 0x8a, 0x8a, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8f, 0x8f, 0x90, 0x90, 0x91, 0x91, 0x91, 0x92, 0x92, 0x93, 0x93, 0x94, 0x94, 0x94, 0x96, 0x96, 0x97, 0x97, 0x98, 0x98, 0x98, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb} }, { /* gamma 5 */ {0x16, 0x18, 0x19, 0x1b, 0x1d, 0x1e, 0x20, 0x21, 0x23, 0x24, 0x25, 0x27, 0x28, 0x2a, 0x2b, 0x2c, 0x2d, 0x2f, 0x30, 0x31, 0x32, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x56, 0x57, 0x58, 0x59, 0x59, 0x5a, 0x5b, 0x5c, 0x5c, 0x5d, 0x5e, 0x5f, 0x5f, 0x60, 0x61, 0x62, 0x62, 0x63, 0x64, 0x64, 0x65, 0x66, 0x66, 0x67, 0x68, 0x68, 0x69, 0x6a, 0x6a, 0x6b, 0x6b, 0x6c, 0x6d, 0x6d, 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x71, 0x72, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x76, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7d, 0x7d, 0x7e, 0x7e, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81, 0x82, 0x82, 0x83, 0x83, 0x84, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86, 0x87, 0x87, 0x88, 0x88, 0x89, 0x89, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8c, 0x8c, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x90, 0x90, 0x91, 0x91, 0x91, 0x92, 0x92, 0x93, 0x93, 0x94, 0x94, 0x94, 0x95, 0x95, 0x96, 0x96, 0x96, 0x97, 0x97, 0x98, 0x98, 0x98, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9f, 0x9f, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa7, 0xa7, 0xa7, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xaa, 0xaa, 0xaa, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb5, 0xb5, 0xb5, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xbb, 0xbb, 0xbb, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc1, 0xc1, 0xc1, 0xc1, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd2, 0xd2, 0xd2, 0xd2, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd5, 0xd5, 0xd5, 0xd5, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xea, 0xea, 0xea, 0xea, 0xea, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0x0f, 0x11, 0x12, 0x14, 0x15, 0x16, 0x18, 0x19, 0x1a, 0x1b, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3c, 0x3d, 0x3e, 0x3f, 0x3f, 0x40, 0x41, 0x42, 0x42, 0x43, 0x44, 0x44, 0x45, 0x46, 0x47, 0x47, 0x48, 0x49, 0x49, 0x4a, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4e, 0x4e, 0x4f, 0x50, 0x50, 0x51, 0x51, 0x52, 0x53, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, 0x56, 0x57, 0x58, 0x58, 0x59, 0x59, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5d, 0x5d, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x61, 0x62, 0x62, 0x63, 0x63, 0x64, 0x64, 0x65, 0x65, 0x66, 0x66, 0x66, 0x67, 0x67, 0x68, 0x68, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6b, 0x6b, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x71, 0x71, 0x72, 0x72, 0x73, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x75, 0x76, 0x76, 0x76, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x83, 0x83, 0x83, 0x84, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x87, 0x87, 0x87, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8c, 0x8c, 0x8c, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94, 0x95, 0x95, 0x95, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9f, 0x9f, 0x9f, 0x9f, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa7, 0xa7, 0xa7, 0xa7, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb5, 0xb5, 0xb5, 0xb5, 0xb5, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc1, 0xc1, 0xc1, 0xc1, 0xc1, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xea, 0xea, 0xea, 0xea, 0xea, 0xea, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0x13, 0x15, 0x16, 0x18, 0x19, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x22, 0x23, 0x24, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4d, 0x4e, 0x4f, 0x50, 0x50, 0x51, 0x52, 0x53, 0x53, 0x54, 0x55, 0x55, 0x56, 0x57, 0x57, 0x58, 0x59, 0x59, 0x5a, 0x5b, 0x5b, 0x5c, 0x5d, 0x5d, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x62, 0x62, 0x63, 0x63, 0x64, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x68, 0x69, 0x69, 0x6a, 0x6a, 0x6b, 0x6b, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e, 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x71, 0x72, 0x72, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x76, 0x76, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x83, 0x83, 0x84, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86, 0x86, 0x87, 0x87, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8c, 0x8c, 0x8c, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x95, 0x95, 0x95, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9f, 0x9f, 0x9f, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa7, 0xa7, 0xa7, 0xa7, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xaa, 0xaa, 0xaa, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb5, 0xb5, 0xb5, 0xb5, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xbb, 0xbb, 0xbb, 0xbb, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc1, 0xc1, 0xc1, 0xc1, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xea, 0xea, 0xea, 0xea, 0xea, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, }, }; reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x00); if (sd->bridge == BRIDGE_TP6810) reg_w(gspca_dev, 0x02, 0x28); /* msleep(50); */ bulk_w(gspca_dev, 0x00, gamma_tb[gamma][0], 1024); bulk_w(gspca_dev, 0x01, gamma_tb[gamma][1], 1024); bulk_w(gspca_dev, 0x02, gamma_tb[gamma][2], 1024); if (sd->bridge == BRIDGE_TP6810) { int i; reg_w(gspca_dev, 0x02, 0x2b); reg_w(gspca_dev, 0x02, 0x28); for (i = 0; i < 6; i++) reg_w(gspca_dev, TP6800_R55_GAMMA_R, gamma_tb[gamma][0][i]); reg_w(gspca_dev, 0x02, 0x2b); reg_w(gspca_dev, 0x02, 0x28); for (i = 0; i < 6; i++) reg_w(gspca_dev, TP6800_R56_GAMMA_G, gamma_tb[gamma][1][i]); reg_w(gspca_dev, 0x02, 0x2b); reg_w(gspca_dev, 0x02, 0x28); for (i = 0; i < 6; i++) reg_w(gspca_dev, TP6800_R57_GAMMA_B, gamma_tb[gamma][2][i]); reg_w(gspca_dev, 0x02, 0x28); } reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x03); /* msleep(50); */ } static void setsharpness(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; if (sd->bridge == BRIDGE_TP6800) { val |= 0x08; /* grid compensation enable */ if (gspca_dev->pixfmt.width == 640) reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */ else val |= 0x04; /* scaling down enable */ reg_w(gspca_dev, TP6800_R5D_DEMOSAIC_CFG, val); } else { val = (val << 5) | 0x08; reg_w(gspca_dev, 0x59, val); } } static void setautogain(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->ag_cnt = val ? AG_CNT_START : -1; } /* set the resolution for sensor cx0342 */ static void set_resolution(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x00); if (gspca_dev->pixfmt.width == 320) { reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x06); msleep(100); i2c_w(gspca_dev, CX0342_AUTO_ADC_CALIB, 0x01); msleep(100); reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x03); reg_w(gspca_dev, TP6800_R78_FORMAT, 0x01); /* qvga */ reg_w(gspca_dev, TP6800_R5D_DEMOSAIC_CFG, 0x0d); i2c_w(gspca_dev, CX0342_EXPO_LINE_L, 0x37); i2c_w(gspca_dev, CX0342_EXPO_LINE_H, 0x01); } else { reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x05); msleep(100); i2c_w(gspca_dev, CX0342_AUTO_ADC_CALIB, 0x01); msleep(100); reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x03); reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */ reg_w(gspca_dev, TP6800_R5D_DEMOSAIC_CFG, 0x09); i2c_w(gspca_dev, CX0342_EXPO_LINE_L, 0xcf); i2c_w(gspca_dev, CX0342_EXPO_LINE_H, 0x00); } i2c_w(gspca_dev, CX0342_SYS_CTRL_0, 0x01); bulk_w(gspca_dev, 0x03, color_gain[SENSOR_CX0342], ARRAY_SIZE(color_gain[0])); setgamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma)); if (sd->sensor == SENSOR_SOI763A) setquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual)); } /* convert the frame rate to a tp68x0 value */ static int get_fr_idx(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i; if (sd->bridge == BRIDGE_TP6800) { for (i = 0; i < ARRAY_SIZE(rates) - 1; i++) { if (sd->framerate >= rates[i]) break; } i = 6 - i; /* 1 = 5fps .. 6 = 30fps */ /* 640x480 * 30 fps does not work */ if (i == 6 /* if 30 fps */ && gspca_dev->pixfmt.width == 640) i = 0x05; /* 15 fps */ } else { for (i = 0; i < ARRAY_SIZE(rates_6810) - 1; i++) { if (sd->framerate >= rates_6810[i]) break; } i = 7 - i; /* 3 = 5fps .. 7 = 30fps */ /* 640x480 * 30 fps does not work */ if (i == 7 /* if 30 fps */ && gspca_dev->pixfmt.width == 640) i = 6; /* 15 fps */ i |= 0x80; /* clock * 1 */ } return i; } static void setframerate(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 fr_idx; fr_idx = get_fr_idx(gspca_dev); if (sd->bridge == BRIDGE_TP6810) { reg_r(gspca_dev, 0x7b); reg_w(gspca_dev, 0x7b, sd->sensor == SENSOR_CX0342 ? 0x10 : 0x90); if (val >= 128) fr_idx = 0xf0; /* lower frame rate */ } reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, fr_idx); if (sd->sensor == SENSOR_CX0342) i2c_w(gspca_dev, CX0342_AUTO_ADC_CALIB, 0x01); } static void setrgain(struct gspca_dev *gspca_dev, s32 rgain) { i2c_w(gspca_dev, CX0342_RAW_RGAIN_H, rgain >> 8); i2c_w(gspca_dev, CX0342_RAW_RGAIN_L, rgain); i2c_w(gspca_dev, CX0342_SYS_CTRL_0, 0x80); } static int sd_setgain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; s32 val = gspca_dev->gain->val; if (sd->sensor == SENSOR_CX0342) { s32 old = gspca_dev->gain->cur.val ? gspca_dev->gain->cur.val : 1; sd->blue->val = sd->blue->val * val / old; if (sd->blue->val > 4095) sd->blue->val = 4095; sd->red->val = sd->red->val * val / old; if (sd->red->val > 4095) sd->red->val = 4095; } if (gspca_dev->streaming) { if (sd->sensor == SENSOR_CX0342) setexposure(gspca_dev, gspca_dev->exposure->val, gspca_dev->gain->val, sd->blue->val, sd->red->val); else setexposure(gspca_dev, gspca_dev->exposure->val, gspca_dev->gain->val, 0, 0); } return gspca_dev->usb_err; } static void setbgain(struct gspca_dev *gspca_dev, s32 bgain) { i2c_w(gspca_dev, CX0342_RAW_BGAIN_H, bgain >> 8); i2c_w(gspca_dev, CX0342_RAW_BGAIN_L, bgain); i2c_w(gspca_dev, CX0342_SYS_CTRL_0, 0x80); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; sd->bridge = id->driver_info; gspca_dev->cam.cam_mode = vga_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode); gspca_dev->cam.mode_framerates = sd->bridge == BRIDGE_TP6800 ? framerates : framerates_6810; sd->framerate = DEFAULT_FRAME_RATE; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; static const struct cmd tp6800_preinit[] = { {TP6800_R10_SIF_TYPE, 0x01}, /* sif */ {TP6800_R11_SIF_CONTROL, 0x01}, {TP6800_R15_GPIO_PU, 0x9f}, {TP6800_R16_GPIO_PD, 0x9f}, {TP6800_R17_GPIO_IO, 0x80}, {TP6800_R18_GPIO_DATA, 0x40}, /* LED off */ }; static const struct cmd tp6810_preinit[] = { {TP6800_R2F_TIMING_CFG, 0x2f}, {TP6800_R15_GPIO_PU, 0x6f}, {TP6800_R16_GPIO_PD, 0x40}, {TP6800_R17_GPIO_IO, 0x9f}, {TP6800_R18_GPIO_DATA, 0xc1}, /* LED off */ }; if (sd->bridge == BRIDGE_TP6800) reg_w_buf(gspca_dev, tp6800_preinit, ARRAY_SIZE(tp6800_preinit)); else reg_w_buf(gspca_dev, tp6810_preinit, ARRAY_SIZE(tp6810_preinit)); msleep(15); reg_r(gspca_dev, TP6800_R18_GPIO_DATA); gspca_dbg(gspca_dev, D_PROBE, "gpio: %02x\n", gspca_dev->usb_buf[0]); /* values: * 0x80: snapshot button * 0x40: LED * 0x20: (bridge / sensor) reset for tp6810 ? * 0x07: sensor type ? */ /* guess the sensor type */ if (force_sensor >= 0) { sd->sensor = force_sensor; } else { if (sd->bridge == BRIDGE_TP6800) { /*fixme: not sure this is working*/ switch (gspca_dev->usb_buf[0] & 0x07) { case 0: sd->sensor = SENSOR_SOI763A; break; case 1: sd->sensor = SENSOR_CX0342; break; } } else { int sensor; sensor = probe_6810(gspca_dev); if (sensor < 0) { pr_warn("Unknown sensor %d - forced to soi763a\n", -sensor); sensor = SENSOR_SOI763A; } sd->sensor = sensor; } } if (sd->sensor == SENSOR_SOI763A) { pr_info("Sensor soi763a\n"); if (sd->bridge == BRIDGE_TP6810) { soi763a_6810_init(gspca_dev); } } else { pr_info("Sensor cx0342\n"); if (sd->bridge == BRIDGE_TP6810) { cx0342_6810_init(gspca_dev); } } set_dqt(gspca_dev, 0); return 0; } /* This function is called before choosing the alt setting */ static int sd_isoc_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; static const struct cmd cx_sensor_init[] = { {CX0342_AUTO_ADC_CALIB, 0x81}, {CX0342_EXPO_LINE_L, 0x37}, {CX0342_EXPO_LINE_H, 0x01}, {CX0342_RAW_GRGAIN_L, 0x00}, {CX0342_RAW_GBGAIN_L, 0x00}, {CX0342_RAW_RGAIN_L, 0x00}, {CX0342_RAW_BGAIN_L, 0x00}, {CX0342_SYS_CTRL_0, 0x81}, }; static const struct cmd cx_bridge_init[] = { {0x4d, 0x00}, {0x4c, 0xff}, {0x4e, 0xff}, {0x4f, 0x00}, }; static const struct cmd ov_sensor_init[] = { {0x10, 0x75}, /* exposure */ {0x76, 0x03}, {0x00, 0x00}, /* gain */ }; static const struct cmd ov_bridge_init[] = { {0x7b, 0x90}, {TP6800_R3F_FRAME_RATE, 0x87}, }; if (sd->bridge == BRIDGE_TP6800) return 0; if (sd->sensor == SENSOR_CX0342) { reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x20); reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x87); i2c_w_buf(gspca_dev, cx_sensor_init, ARRAY_SIZE(cx_sensor_init)); reg_w_buf(gspca_dev, cx_bridge_init, ARRAY_SIZE(cx_bridge_init)); bulk_w(gspca_dev, 0x03, color_null, sizeof color_null); reg_w(gspca_dev, 0x59, 0x40); } else { reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x21); i2c_w_buf(gspca_dev, ov_sensor_init, ARRAY_SIZE(ov_sensor_init)); reg_r(gspca_dev, 0x7b); reg_w_buf(gspca_dev, ov_bridge_init, ARRAY_SIZE(ov_bridge_init)); } reg_w(gspca_dev, TP6800_R78_FORMAT, gspca_dev->curr_mode ? 0x00 : 0x01); return gspca_dev->usb_err; } static void set_led(struct gspca_dev *gspca_dev, int on) { u8 data; reg_r(gspca_dev, TP6800_R18_GPIO_DATA); data = gspca_dev->usb_buf[0]; if (on) data &= ~0x40; else data |= 0x40; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, data); } static void cx0342_6800_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; static const struct cmd reg_init[] = { /* fixme: is this useful? */ {TP6800_R17_GPIO_IO, 0x9f}, {TP6800_R16_GPIO_PD, 0x40}, {TP6800_R10_SIF_TYPE, 0x00}, /* i2c 8 bits */ {TP6800_R50, 0x00}, {TP6800_R51, 0x00}, {TP6800_R52, 0xff}, {TP6800_R53, 0x03}, {TP6800_R54_DARK_CFG, 0x07}, {TP6800_R5C_EDGE_THRLD, 0x40}, {TP6800_R7A_BLK_THRLD, 0x40}, {TP6800_R2F_TIMING_CFG, 0x17}, {TP6800_R30_SENSOR_CFG, 0x18}, /* G1B..RG0 */ {TP6800_R37_FRONT_DARK_ST, 0x00}, {TP6800_R38_FRONT_DARK_END, 0x00}, {TP6800_R39_REAR_DARK_ST_L, 0x00}, {TP6800_R3A_REAR_DARK_ST_H, 0x00}, {TP6800_R3B_REAR_DARK_END_L, 0x00}, {TP6800_R3C_REAR_DARK_END_H, 0x00}, {TP6800_R3D_HORIZ_DARK_LINE_L, 0x00}, {TP6800_R3E_HORIZ_DARK_LINE_H, 0x00}, {TP6800_R21_ENDP_1_CTL, 0x03}, {TP6800_R31_PIXEL_START, 0x0b}, {TP6800_R32_PIXEL_END_L, 0x8a}, {TP6800_R33_PIXEL_END_H, 0x02}, {TP6800_R34_LINE_START, 0x0e}, {TP6800_R35_LINE_END_L, 0xf4}, {TP6800_R36_LINE_END_H, 0x01}, {TP6800_R78_FORMAT, 0x00}, {TP6800_R12_SIF_ADDR_S, 0x20}, /* cx0342 i2c addr */ }; static const struct cmd sensor_init[] = { {CX0342_OUTPUT_CTRL, 0x07}, {CX0342_BYPASS_MODE, 0x58}, {CX0342_GPXLTHD_L, 0x16}, {CX0342_RBPXLTHD_L, 0x16}, {CX0342_PLANETHD_L, 0xc0}, {CX0342_PLANETHD_H, 0x03}, {CX0342_RB_GAP_L, 0xff}, {CX0342_RB_GAP_H, 0x07}, {CX0342_G_GAP_L, 0xff}, {CX0342_G_GAP_H, 0x07}, {CX0342_RST_OVERFLOW_L, 0x5c}, {CX0342_RST_OVERFLOW_H, 0x01}, {CX0342_DATA_OVERFLOW_L, 0xfc}, {CX0342_DATA_OVERFLOW_H, 0x03}, {CX0342_DATA_UNDERFLOW_L, 0x00}, {CX0342_DATA_UNDERFLOW_H, 0x00}, {CX0342_SYS_CTRL_0, 0x40}, {CX0342_GLOBAL_GAIN, 0x01}, {CX0342_CLOCK_GEN, 0x00}, {CX0342_SYS_CTRL_0, 0x02}, {CX0342_IDLE_CTRL, 0x05}, {CX0342_ADCGN, 0x00}, {CX0342_ADC_CTL, 0x00}, {CX0342_LVRST_BLBIAS, 0x01}, {CX0342_VTHSEL, 0x0b}, {CX0342_RAMP_RIV, 0x0b}, {CX0342_LDOSEL, 0x07}, {CX0342_SPV_VALUE_L, 0x40}, {CX0342_SPV_VALUE_H, 0x02}, }; reg_w_buf(gspca_dev, reg_init, ARRAY_SIZE(reg_init)); i2c_w_buf(gspca_dev, sensor_init, ARRAY_SIZE(sensor_init)); i2c_w_buf(gspca_dev, cx0342_timing_seq, ARRAY_SIZE(cx0342_timing_seq)); reg_w(gspca_dev, TP6800_R5C_EDGE_THRLD, 0x10); reg_w(gspca_dev, TP6800_R54_DARK_CFG, 0x00); i2c_w(gspca_dev, CX0342_EXPO_LINE_H, 0x00); i2c_w(gspca_dev, CX0342_SYS_CTRL_0, 0x01); if (sd->sensor == SENSOR_CX0342) setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), v4l2_ctrl_g_ctrl(sd->blue), v4l2_ctrl_g_ctrl(sd->red)); else setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), 0, 0); set_led(gspca_dev, 1); set_resolution(gspca_dev); } static void cx0342_6810_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; static const struct cmd sensor_init_2[] = { {CX0342_EXPO_LINE_L, 0x6f}, {CX0342_EXPO_LINE_H, 0x02}, {CX0342_RAW_GRGAIN_L, 0x00}, {CX0342_RAW_GBGAIN_L, 0x00}, {CX0342_RAW_RGAIN_L, 0x00}, {CX0342_RAW_BGAIN_L, 0x00}, {CX0342_SYS_CTRL_0, 0x81}, }; static const struct cmd bridge_init_2[] = { {0x4d, 0x00}, {0x4c, 0xff}, {0x4e, 0xff}, {0x4f, 0x00}, {TP6800_R7A_BLK_THRLD, 0x00}, {TP6800_R79_QUALITY, 0x04}, {TP6800_R79_QUALITY, 0x01}, }; static const struct cmd bridge_init_3[] = { {TP6800_R31_PIXEL_START, 0x08}, {TP6800_R32_PIXEL_END_L, 0x87}, {TP6800_R33_PIXEL_END_H, 0x02}, {TP6800_R34_LINE_START, 0x0e}, {TP6800_R35_LINE_END_L, 0xf4}, {TP6800_R36_LINE_END_H, 0x01}, }; static const struct cmd sensor_init_3[] = { {CX0342_AUTO_ADC_CALIB, 0x81}, {CX0342_EXPO_LINE_L, 0x6f}, {CX0342_EXPO_LINE_H, 0x02}, {CX0342_RAW_GRGAIN_L, 0x00}, {CX0342_RAW_GBGAIN_L, 0x00}, {CX0342_RAW_RGAIN_L, 0x00}, {CX0342_RAW_BGAIN_L, 0x00}, {CX0342_SYS_CTRL_0, 0x81}, }; static const struct cmd bridge_init_5[] = { {0x4d, 0x00}, {0x4c, 0xff}, {0x4e, 0xff}, {0x4f, 0x00}, }; static const struct cmd sensor_init_4[] = { {CX0342_EXPO_LINE_L, 0xd3}, {CX0342_EXPO_LINE_H, 0x01}, /*fixme: gains, but 00..80 only*/ {CX0342_RAW_GRGAIN_L, 0x40}, {CX0342_RAW_GBGAIN_L, 0x40}, {CX0342_RAW_RGAIN_L, 0x40}, {CX0342_RAW_BGAIN_L, 0x40}, {CX0342_SYS_CTRL_0, 0x81}, }; static const struct cmd sensor_init_5[] = { {CX0342_IDLE_CTRL, 0x05}, {CX0342_ADCGN, 0x00}, {CX0342_ADC_CTL, 0x00}, {CX0342_LVRST_BLBIAS, 0x01}, {CX0342_VTHSEL, 0x0b}, {CX0342_RAMP_RIV, 0x0b}, {CX0342_LDOSEL, 0x07}, {CX0342_SPV_VALUE_L, 0x40}, {CX0342_SPV_VALUE_H, 0x02}, {CX0342_AUTO_ADC_CALIB, 0x81}, }; reg_w(gspca_dev, 0x22, gspca_dev->alt); i2c_w_buf(gspca_dev, sensor_init_2, ARRAY_SIZE(sensor_init_2)); reg_w_buf(gspca_dev, bridge_init_2, ARRAY_SIZE(bridge_init_2)); reg_w_buf(gspca_dev, tp6810_cx_init_common, ARRAY_SIZE(tp6810_cx_init_common)); reg_w_buf(gspca_dev, bridge_init_3, ARRAY_SIZE(bridge_init_3)); if (gspca_dev->curr_mode) { reg_w(gspca_dev, 0x4a, 0x7f); reg_w(gspca_dev, 0x07, 0x05); reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */ } else { reg_w(gspca_dev, 0x4a, 0xff); reg_w(gspca_dev, 0x07, 0x85); reg_w(gspca_dev, TP6800_R78_FORMAT, 0x01); /* qvga */ } setgamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma)); reg_w_buf(gspca_dev, tp6810_bridge_start, ARRAY_SIZE(tp6810_bridge_start)); setsharpness(gspca_dev, v4l2_ctrl_g_ctrl(sd->sharpness)); bulk_w(gspca_dev, 0x03, color_gain[SENSOR_CX0342], ARRAY_SIZE(color_gain[0])); reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x87); i2c_w_buf(gspca_dev, sensor_init_3, ARRAY_SIZE(sensor_init_3)); reg_w_buf(gspca_dev, bridge_init_5, ARRAY_SIZE(bridge_init_5)); i2c_w_buf(gspca_dev, sensor_init_4, ARRAY_SIZE(sensor_init_4)); reg_w_buf(gspca_dev, bridge_init_5, ARRAY_SIZE(bridge_init_5)); i2c_w_buf(gspca_dev, sensor_init_5, ARRAY_SIZE(sensor_init_5)); set_led(gspca_dev, 1); /* setquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual)); */ } static void soi763a_6800_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; static const struct cmd reg_init[] = { {TP6800_R79_QUALITY, 0x04}, {TP6800_R79_QUALITY, 0x01}, {TP6800_R10_SIF_TYPE, 0x00}, /* i2c 8 bits */ {TP6800_R50, 0x00}, {TP6800_R51, 0x00}, {TP6800_R52, 0xff}, {TP6800_R53, 0x03}, {TP6800_R54_DARK_CFG, 0x07}, {TP6800_R5C_EDGE_THRLD, 0x40}, {TP6800_R79_QUALITY, 0x03}, {TP6800_R7A_BLK_THRLD, 0x40}, {TP6800_R2F_TIMING_CFG, 0x46}, {TP6800_R30_SENSOR_CFG, 0x10}, /* BG1..G0R */ {TP6800_R37_FRONT_DARK_ST, 0x00}, {TP6800_R38_FRONT_DARK_END, 0x00}, {TP6800_R39_REAR_DARK_ST_L, 0x00}, {TP6800_R3A_REAR_DARK_ST_H, 0x00}, {TP6800_R3B_REAR_DARK_END_L, 0x00}, {TP6800_R3C_REAR_DARK_END_H, 0x00}, {TP6800_R3D_HORIZ_DARK_LINE_L, 0x00}, {TP6800_R3E_HORIZ_DARK_LINE_H, 0x00}, {TP6800_R21_ENDP_1_CTL, 0x03}, {TP6800_R3F_FRAME_RATE, 0x04}, /* 15 fps */ {TP6800_R5D_DEMOSAIC_CFG, 0x0e}, /* scale down - medium edge */ {TP6800_R31_PIXEL_START, 0x1b}, {TP6800_R32_PIXEL_END_L, 0x9a}, {TP6800_R33_PIXEL_END_H, 0x02}, {TP6800_R34_LINE_START, 0x0f}, {TP6800_R35_LINE_END_L, 0xf4}, {TP6800_R36_LINE_END_H, 0x01}, {TP6800_R78_FORMAT, 0x01}, /* qvga */ {TP6800_R12_SIF_ADDR_S, 0x21}, /* soi763a i2c addr */ {TP6800_R1A_SIF_TX_DATA2, 0x00}, }; static const struct cmd sensor_init[] = { {0x12, 0x48}, /* mirror - RGB */ {0x13, 0xa0}, /* clock - no AGC nor AEC */ {0x03, 0xa4}, /* saturation */ {0x04, 0x30}, /* hue */ {0x05, 0x88}, /* contrast */ {0x06, 0x60}, /* brightness */ {0x10, 0x41}, /* AEC */ {0x11, 0x40}, /* clock rate */ {0x13, 0xa0}, {0x14, 0x00}, /* 640x480 */ {0x15, 0x14}, {0x1f, 0x41}, {0x20, 0x80}, {0x23, 0xee}, {0x24, 0x50}, {0x25, 0x7a}, {0x26, 0x00}, {0x27, 0xe2}, {0x28, 0xb0}, {0x2a, 0x00}, {0x2b, 0x00}, {0x2d, 0x81}, {0x2f, 0x9d}, {0x60, 0x80}, {0x61, 0x00}, {0x62, 0x88}, {0x63, 0x11}, {0x64, 0x89}, {0x65, 0x00}, {0x67, 0x94}, {0x68, 0x7a}, {0x69, 0x0f}, {0x6c, 0x80}, {0x6d, 0x80}, {0x6e, 0x80}, {0x6f, 0xff}, {0x71, 0x20}, {0x74, 0x20}, {0x75, 0x86}, {0x77, 0xb5}, {0x17, 0x18}, /* H href start */ {0x18, 0xbf}, /* H href end */ {0x19, 0x03}, /* V start */ {0x1a, 0xf8}, /* V end */ {0x01, 0x80}, /* blue gain */ {0x02, 0x80}, /* red gain */ }; reg_w_buf(gspca_dev, reg_init, ARRAY_SIZE(reg_init)); i2c_w(gspca_dev, 0x12, 0x80); /* sensor reset */ msleep(10); i2c_w_buf(gspca_dev, sensor_init, ARRAY_SIZE(sensor_init)); reg_w(gspca_dev, TP6800_R5C_EDGE_THRLD, 0x10); reg_w(gspca_dev, TP6800_R54_DARK_CFG, 0x00); setsharpness(gspca_dev, v4l2_ctrl_g_ctrl(sd->sharpness)); bulk_w(gspca_dev, 0x03, color_gain[SENSOR_SOI763A], ARRAY_SIZE(color_gain[0])); set_led(gspca_dev, 1); if (sd->sensor == SENSOR_CX0342) setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), v4l2_ctrl_g_ctrl(sd->blue), v4l2_ctrl_g_ctrl(sd->red)); else setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), 0, 0); if (sd->sensor == SENSOR_SOI763A) setquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual)); setgamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma)); } static void soi763a_6810_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; static const struct cmd bridge_init_2[] = { {TP6800_R7A_BLK_THRLD, 0x00}, {TP6800_R79_QUALITY, 0x04}, {TP6800_R79_QUALITY, 0x01}, }; static const struct cmd bridge_init_3[] = { {TP6800_R31_PIXEL_START, 0x20}, {TP6800_R32_PIXEL_END_L, 0x9f}, {TP6800_R33_PIXEL_END_H, 0x02}, {TP6800_R34_LINE_START, 0x13}, {TP6800_R35_LINE_END_L, 0xf8}, {TP6800_R36_LINE_END_H, 0x01}, }; static const struct cmd bridge_init_6[] = { {0x08, 0xff}, {0x09, 0xff}, {0x0a, 0x5f}, {0x0b, 0x80}, }; reg_w(gspca_dev, 0x22, gspca_dev->alt); bulk_w(gspca_dev, 0x03, color_null, sizeof color_null); reg_w(gspca_dev, 0x59, 0x40); if (sd->sensor == SENSOR_CX0342) setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), v4l2_ctrl_g_ctrl(sd->blue), v4l2_ctrl_g_ctrl(sd->red)); else setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), 0, 0); reg_w_buf(gspca_dev, bridge_init_2, ARRAY_SIZE(bridge_init_2)); reg_w_buf(gspca_dev, tp6810_ov_init_common, ARRAY_SIZE(tp6810_ov_init_common)); reg_w_buf(gspca_dev, bridge_init_3, ARRAY_SIZE(bridge_init_3)); if (gspca_dev->curr_mode) { reg_w(gspca_dev, 0x4a, 0x7f); reg_w(gspca_dev, 0x07, 0x05); reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */ } else { reg_w(gspca_dev, 0x4a, 0xff); reg_w(gspca_dev, 0x07, 0x85); reg_w(gspca_dev, TP6800_R78_FORMAT, 0x01); /* qvga */ } setgamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma)); reg_w_buf(gspca_dev, tp6810_bridge_start, ARRAY_SIZE(tp6810_bridge_start)); if (gspca_dev->curr_mode) { reg_w(gspca_dev, 0x4f, 0x00); reg_w(gspca_dev, 0x4e, 0x7c); } reg_w(gspca_dev, 0x00, 0x00); setsharpness(gspca_dev, v4l2_ctrl_g_ctrl(sd->sharpness)); bulk_w(gspca_dev, 0x03, color_gain[SENSOR_SOI763A], ARRAY_SIZE(color_gain[0])); set_led(gspca_dev, 1); reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0xf0); if (sd->sensor == SENSOR_CX0342) setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), v4l2_ctrl_g_ctrl(sd->blue), v4l2_ctrl_g_ctrl(sd->red)); else setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), 0, 0); reg_w_buf(gspca_dev, bridge_init_6, ARRAY_SIZE(bridge_init_6)); } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, gspca_dev->pixfmt.width); set_dqt(gspca_dev, sd->quality); if (sd->bridge == BRIDGE_TP6800) { if (sd->sensor == SENSOR_CX0342) cx0342_6800_start(gspca_dev); else soi763a_6800_start(gspca_dev); } else { if (sd->sensor == SENSOR_CX0342) cx0342_6810_start(gspca_dev); else soi763a_6810_start(gspca_dev); reg_w_buf(gspca_dev, tp6810_late_start, ARRAY_SIZE(tp6810_late_start)); reg_w(gspca_dev, 0x80, 0x03); reg_w(gspca_dev, 0x82, gspca_dev->curr_mode ? 0x0a : 0x0e); if (sd->sensor == SENSOR_CX0342) setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), v4l2_ctrl_g_ctrl(sd->blue), v4l2_ctrl_g_ctrl(sd->red)); else setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), 0, 0); if (sd->sensor == SENSOR_SOI763A) setquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual)); if (sd->bridge == BRIDGE_TP6810) setautogain(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->autogain)); } setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure)); return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->bridge == BRIDGE_TP6800) reg_w(gspca_dev, TP6800_R2F_TIMING_CFG, 0x03); set_led(gspca_dev, 0); reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x00); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; /* the start of frame contains: * ff d8 * ff fe * width / 16 * height / 8 * quality */ if (sd->bridge == BRIDGE_TP6810) { if (*data != 0x5a) { /*fixme: don't discard the whole frame..*/ if (*data == 0xaa || *data == 0x00) return; if (*data > 0xc0) { gspca_dbg(gspca_dev, D_FRAM, "bad frame\n"); gspca_dev->last_packet_type = DISCARD_PACKET; return; } } data++; len--; if (len < 2) { gspca_dev->last_packet_type = DISCARD_PACKET; return; } if (*data == 0xff && data[1] == 0xd8) { /*fixme: there may be information in the 4 high bits*/ if (len < 7) { gspca_dev->last_packet_type = DISCARD_PACKET; return; } if ((data[6] & 0x0f) != sd->quality) set_dqt(gspca_dev, data[6] & 0x0f); gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); gspca_frame_add(gspca_dev, INTER_PACKET, data + 7, len - 7); } else if (data[len - 2] == 0xff && data[len - 1] == 0xd9) { gspca_frame_add(gspca_dev, LAST_PACKET, data, len); } else { gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } return; } switch (*data) { case 0x55: gspca_frame_add(gspca_dev, LAST_PACKET, data, 0); if (len < 8 || data[1] != 0xff || data[2] != 0xd8 || data[3] != 0xff || data[4] != 0xfe) { /* Have only seen this with corrupt frames */ gspca_dev->last_packet_type = DISCARD_PACKET; return; } if (data[7] != sd->quality) set_dqt(gspca_dev, data[7]); gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); gspca_frame_add(gspca_dev, INTER_PACKET, data + 8, len - 8); break; case 0xaa: gspca_dev->last_packet_type = DISCARD_PACKET; break; case 0xcc: if (len >= 3 && (data[1] != 0xff || data[2] != 0xd8)) gspca_frame_add(gspca_dev, INTER_PACKET, data + 1, len - 1); else gspca_dev->last_packet_type = DISCARD_PACKET; break; } } static void sd_dq_callback(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret, alen; int luma, expo; if (sd->ag_cnt < 0) return; if (--sd->ag_cnt > 5) return; switch (sd->ag_cnt) { /* case 5: */ default: reg_w(gspca_dev, 0x7d, 0x00); break; case 4: reg_w(gspca_dev, 0x27, 0xb0); break; case 3: reg_w(gspca_dev, 0x0c, 0x01); break; case 2: ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x02), gspca_dev->usb_buf, 32, &alen, 500); if (ret < 0) { pr_err("bulk err %d\n", ret); break; } /* values not used (unknown) */ break; case 1: reg_w(gspca_dev, 0x27, 0xd0); break; case 0: ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x02), gspca_dev->usb_buf, 32, &alen, 500); if (ret < 0) { pr_err("bulk err %d\n", ret); break; } luma = ((gspca_dev->usb_buf[8] << 8) + gspca_dev->usb_buf[7] + (gspca_dev->usb_buf[11] << 8) + gspca_dev->usb_buf[10] + (gspca_dev->usb_buf[14] << 8) + gspca_dev->usb_buf[13] + (gspca_dev->usb_buf[17] << 8) + gspca_dev->usb_buf[16] + (gspca_dev->usb_buf[20] << 8) + gspca_dev->usb_buf[19] + (gspca_dev->usb_buf[23] << 8) + gspca_dev->usb_buf[22] + (gspca_dev->usb_buf[26] << 8) + gspca_dev->usb_buf[25] + (gspca_dev->usb_buf[29] << 8) + gspca_dev->usb_buf[28]) / 8; if (gspca_dev->pixfmt.width == 640) luma /= 4; reg_w(gspca_dev, 0x7d, 0x00); expo = v4l2_ctrl_g_ctrl(gspca_dev->exposure); ret = gspca_expo_autogain(gspca_dev, luma, 60, /* desired luma */ 6, /* dead zone */ 2, /* gain knee */ 70); /* expo knee */ sd->ag_cnt = AG_CNT_START; if (sd->bridge == BRIDGE_TP6810) { int new_expo = v4l2_ctrl_g_ctrl(gspca_dev->exposure); if ((expo >= 128 && new_expo < 128) || (expo < 128 && new_expo >= 128)) setframerate(gspca_dev, new_expo); } break; } } /* get stream parameters (framerate) */ static void sd_get_streamparm(struct gspca_dev *gspca_dev, struct v4l2_streamparm *parm) { struct sd *sd = (struct sd *) gspca_dev; struct v4l2_captureparm *cp = &parm->parm.capture; struct v4l2_fract *tpf = &cp->timeperframe; int fr, i; tpf->numerator = 1; i = get_fr_idx(gspca_dev); if (i & 0x80) { if (sd->bridge == BRIDGE_TP6800) fr = rates[6 - (i & 0x07)]; else fr = rates_6810[7 - (i & 0x07)]; } else { fr = rates[6 - i]; } tpf->denominator = fr; } /* set stream parameters (framerate) */ static void sd_set_streamparm(struct gspca_dev *gspca_dev, struct v4l2_streamparm *parm) { struct sd *sd = (struct sd *) gspca_dev; struct v4l2_captureparm *cp = &parm->parm.capture; struct v4l2_fract *tpf = &cp->timeperframe; int fr, i; if (tpf->numerator == 0 || tpf->denominator == 0) sd->framerate = DEFAULT_FRAME_RATE; else sd->framerate = tpf->denominator / tpf->numerator; if (gspca_dev->streaming) setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure)); /* Return the actual framerate */ i = get_fr_idx(gspca_dev); if (i & 0x80) fr = rates_6810[7 - (i & 0x07)]; else fr = rates[6 - i]; tpf->numerator = 1; tpf->denominator = fr; } static int sd_set_jcomp(struct gspca_dev *gspca_dev, const struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor != SENSOR_SOI763A) return -ENOTTY; v4l2_ctrl_s_ctrl(sd->jpegqual, jcomp->quality); return 0; } static int sd_get_jcomp(struct gspca_dev *gspca_dev, struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor != SENSOR_SOI763A) return -ENOTTY; memset(jcomp, 0, sizeof *jcomp); jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual); jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT; return 0; } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_SHARPNESS: setsharpness(gspca_dev, ctrl->val); break; case V4L2_CID_GAMMA: setgamma(gspca_dev, ctrl->val); break; case V4L2_CID_BLUE_BALANCE: setbgain(gspca_dev, ctrl->val); break; case V4L2_CID_RED_BALANCE: setrgain(gspca_dev, ctrl->val); break; case V4L2_CID_EXPOSURE: sd_setgain(gspca_dev); break; case V4L2_CID_AUTOGAIN: if (ctrl->val) break; sd_setgain(gspca_dev); break; case V4L2_CID_JPEG_COMPRESSION_QUALITY: jpeg_set_qual(sd->jpeg_hdr, ctrl->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 4); gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 1, 0xdc, 1, 0x4e); if (sd->sensor == SENSOR_CX0342) { sd->red = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_RED_BALANCE, 0, 4095, 1, 256); sd->blue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BLUE_BALANCE, 0, 4095, 1, 256); } if (sd->sensor == SENSOR_SOI763A) gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 15, 1, 3); else gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 4095, 1, 256); sd->sharpness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SHARPNESS, 0, 3, 1, 2); sd->gamma = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAMMA, 0, NGAMMA - 1, 1, (sd->sensor == SENSOR_SOI763A && sd->bridge == BRIDGE_TP6800) ? 0 : 1); if (sd->bridge == BRIDGE_TP6810) gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); if (sd->sensor == SENSOR_SOI763A) sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_JPEG_COMPRESSION_QUALITY, 0, 15, 1, (sd->bridge == BRIDGE_TP6810) ? 0 : 13); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } if (gspca_dev->autogain) v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false); else v4l2_ctrl_cluster(2, &gspca_dev->exposure); return 0; } static const struct sd_desc sd_desc = { .name = KBUILD_MODNAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .isoc_init = sd_isoc_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .dq_callback = sd_dq_callback, .get_streamparm = sd_get_streamparm, .set_streamparm = sd_set_streamparm, .get_jcomp = sd_get_jcomp, .set_jcomp = sd_set_jcomp, }; static const struct usb_device_id device_table[] = { {USB_DEVICE(0x06a2, 0x0003), .driver_info = BRIDGE_TP6800}, {USB_DEVICE(0x06a2, 0x6810), .driver_info = BRIDGE_TP6810}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, device_table); static int sd_probe(struct usb_interface *interface, const struct usb_device_id *id) { return gspca_dev_probe(interface, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = KBUILD_MODNAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); module_param(force_sensor, int, 0644); MODULE_PARM_DESC(force_sensor, "Force sensor. 0: cx0342, 1: soi763a");
18 1 1 1 1 2 12 4 6 4 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2022 Pablo Neira Ayuso <pablo@netfilter.org> */ #include <linux/kernel.h> #include <linux/if_vlan.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nft_meta.h> #include <net/netfilter/nf_tables_offload.h> #include <linux/tcp.h> #include <linux/udp.h> #include <net/gre.h> #include <net/geneve.h> #include <net/ip.h> #include <linux/icmpv6.h> #include <linux/ip.h> #include <linux/ipv6.h> struct nft_inner_tun_ctx_locked { struct nft_inner_tun_ctx ctx; local_lock_t bh_lock; }; static DEFINE_PER_CPU(struct nft_inner_tun_ctx_locked, nft_pcpu_tun_ctx) = { .bh_lock = INIT_LOCAL_LOCK(bh_lock), }; /* Same layout as nft_expr but it embeds the private expression data area. */ struct __nft_expr { const struct nft_expr_ops *ops; union { struct nft_payload payload; struct nft_meta meta; } __attribute__((aligned(__alignof__(u64)))); }; enum { NFT_INNER_EXPR_PAYLOAD, NFT_INNER_EXPR_META, }; struct nft_inner { u8 flags; u8 hdrsize; u8 type; u8 expr_type; struct __nft_expr expr; }; static int nft_inner_parse_l2l3(const struct nft_inner *priv, const struct nft_pktinfo *pkt, struct nft_inner_tun_ctx *ctx, u32 off) { __be16 llproto, outer_llproto; u32 nhoff, thoff; if (priv->flags & NFT_INNER_LL) { struct vlan_ethhdr *veth, _veth; struct ethhdr *eth, _eth; u32 hdrsize; eth = skb_header_pointer(pkt->skb, off, sizeof(_eth), &_eth); if (!eth) return -1; switch (eth->h_proto) { case htons(ETH_P_IP): case htons(ETH_P_IPV6): llproto = eth->h_proto; hdrsize = sizeof(_eth); break; case htons(ETH_P_8021Q): veth = skb_header_pointer(pkt->skb, off, sizeof(_veth), &_veth); if (!veth) return -1; outer_llproto = veth->h_vlan_encapsulated_proto; llproto = veth->h_vlan_proto; hdrsize = sizeof(_veth); break; default: return -1; } ctx->inner_lloff = off; ctx->flags |= NFT_PAYLOAD_CTX_INNER_LL; off += hdrsize; } else { struct iphdr *iph; u32 _version; iph = skb_header_pointer(pkt->skb, off, sizeof(_version), &_version); if (!iph) return -1; switch (iph->version) { case 4: llproto = htons(ETH_P_IP); break; case 6: llproto = htons(ETH_P_IPV6); break; default: return -1; } } ctx->llproto = llproto; if (llproto == htons(ETH_P_8021Q)) llproto = outer_llproto; nhoff = off; switch (llproto) { case htons(ETH_P_IP): { struct iphdr *iph, _iph; iph = skb_header_pointer(pkt->skb, nhoff, sizeof(_iph), &_iph); if (!iph) return -1; if (iph->ihl < 5 || iph->version != 4) return -1; ctx->inner_nhoff = nhoff; ctx->flags |= NFT_PAYLOAD_CTX_INNER_NH; thoff = nhoff + (iph->ihl * 4); if ((ntohs(iph->frag_off) & IP_OFFSET) == 0) { ctx->flags |= NFT_PAYLOAD_CTX_INNER_TH; ctx->inner_thoff = thoff; ctx->l4proto = iph->protocol; } } break; case htons(ETH_P_IPV6): { struct ipv6hdr *ip6h, _ip6h; int fh_flags = IP6_FH_F_AUTH; unsigned short fragoff; int l4proto; ip6h = skb_header_pointer(pkt->skb, nhoff, sizeof(_ip6h), &_ip6h); if (!ip6h) return -1; if (ip6h->version != 6) return -1; ctx->inner_nhoff = nhoff; ctx->flags |= NFT_PAYLOAD_CTX_INNER_NH; thoff = nhoff; l4proto = ipv6_find_hdr(pkt->skb, &thoff, -1, &fragoff, &fh_flags); if (l4proto < 0 || thoff > U16_MAX) return -1; if (fragoff == 0) { thoff = nhoff + sizeof(_ip6h); ctx->flags |= NFT_PAYLOAD_CTX_INNER_TH; ctx->inner_thoff = thoff; ctx->l4proto = l4proto; } } break; default: return -1; } return 0; } static int nft_inner_parse_tunhdr(const struct nft_inner *priv, const struct nft_pktinfo *pkt, struct nft_inner_tun_ctx *ctx, u32 *off) { if (pkt->tprot == IPPROTO_GRE) { ctx->inner_tunoff = pkt->thoff; ctx->flags |= NFT_PAYLOAD_CTX_INNER_TUN; return 0; } if (pkt->tprot != IPPROTO_UDP) return -1; ctx->inner_tunoff = *off; ctx->flags |= NFT_PAYLOAD_CTX_INNER_TUN; *off += priv->hdrsize; switch (priv->type) { case NFT_INNER_GENEVE: { struct genevehdr *gnvh, _gnvh; gnvh = skb_header_pointer(pkt->skb, pkt->inneroff, sizeof(_gnvh), &_gnvh); if (!gnvh) return -1; *off += gnvh->opt_len * 4; } break; default: break; } return 0; } static int nft_inner_parse(const struct nft_inner *priv, struct nft_pktinfo *pkt, struct nft_inner_tun_ctx *tun_ctx) { u32 off = pkt->inneroff; if (priv->flags & NFT_INNER_HDRSIZE && nft_inner_parse_tunhdr(priv, pkt, tun_ctx, &off) < 0) return -1; if (priv->flags & (NFT_INNER_LL | NFT_INNER_NH)) { if (nft_inner_parse_l2l3(priv, pkt, tun_ctx, off) < 0) return -1; } else if (priv->flags & NFT_INNER_TH) { tun_ctx->inner_thoff = off; tun_ctx->flags |= NFT_PAYLOAD_CTX_INNER_TH; } tun_ctx->type = priv->type; tun_ctx->cookie = (unsigned long)pkt->skb; pkt->flags |= NFT_PKTINFO_INNER_FULL; return 0; } static bool nft_inner_restore_tun_ctx(const struct nft_pktinfo *pkt, struct nft_inner_tun_ctx *tun_ctx) { struct nft_inner_tun_ctx *this_cpu_tun_ctx; local_bh_disable(); local_lock_nested_bh(&nft_pcpu_tun_ctx.bh_lock); this_cpu_tun_ctx = this_cpu_ptr(&nft_pcpu_tun_ctx.ctx); if (this_cpu_tun_ctx->cookie != (unsigned long)pkt->skb) { local_bh_enable(); local_unlock_nested_bh(&nft_pcpu_tun_ctx.bh_lock); return false; } *tun_ctx = *this_cpu_tun_ctx; local_unlock_nested_bh(&nft_pcpu_tun_ctx.bh_lock); local_bh_enable(); return true; } static void nft_inner_save_tun_ctx(const struct nft_pktinfo *pkt, const struct nft_inner_tun_ctx *tun_ctx) { struct nft_inner_tun_ctx *this_cpu_tun_ctx; local_bh_disable(); local_lock_nested_bh(&nft_pcpu_tun_ctx.bh_lock); this_cpu_tun_ctx = this_cpu_ptr(&nft_pcpu_tun_ctx.ctx); if (this_cpu_tun_ctx->cookie != tun_ctx->cookie) *this_cpu_tun_ctx = *tun_ctx; local_unlock_nested_bh(&nft_pcpu_tun_ctx.bh_lock); local_bh_enable(); } static bool nft_inner_parse_needed(const struct nft_inner *priv, const struct nft_pktinfo *pkt, struct nft_inner_tun_ctx *tun_ctx) { if (!(pkt->flags & NFT_PKTINFO_INNER_FULL)) return true; if (!nft_inner_restore_tun_ctx(pkt, tun_ctx)) return true; if (priv->type != tun_ctx->type) return true; return false; } static void nft_inner_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_inner *priv = nft_expr_priv(expr); struct nft_inner_tun_ctx tun_ctx = {}; if (nft_payload_inner_offset(pkt) < 0) goto err; if (nft_inner_parse_needed(priv, pkt, &tun_ctx) && nft_inner_parse(priv, (struct nft_pktinfo *)pkt, &tun_ctx) < 0) goto err; switch (priv->expr_type) { case NFT_INNER_EXPR_PAYLOAD: nft_payload_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, &tun_ctx); break; case NFT_INNER_EXPR_META: nft_meta_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, &tun_ctx); break; default: WARN_ON_ONCE(1); goto err; } nft_inner_save_tun_ctx(pkt, &tun_ctx); return; err: regs->verdict.code = NFT_BREAK; } static const struct nla_policy nft_inner_policy[NFTA_INNER_MAX + 1] = { [NFTA_INNER_NUM] = { .type = NLA_U32 }, [NFTA_INNER_FLAGS] = { .type = NLA_U32 }, [NFTA_INNER_HDRSIZE] = { .type = NLA_U32 }, [NFTA_INNER_TYPE] = { .type = NLA_U32 }, [NFTA_INNER_EXPR] = { .type = NLA_NESTED }, }; struct nft_expr_info { const struct nft_expr_ops *ops; const struct nlattr *attr; struct nlattr *tb[NFT_EXPR_MAXATTR + 1]; }; static int nft_inner_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_inner *priv = nft_expr_priv(expr); u32 flags, hdrsize, type, num; struct nft_expr_info expr_info; int err; if (!tb[NFTA_INNER_FLAGS] || !tb[NFTA_INNER_NUM] || !tb[NFTA_INNER_HDRSIZE] || !tb[NFTA_INNER_TYPE] || !tb[NFTA_INNER_EXPR]) return -EINVAL; flags = ntohl(nla_get_be32(tb[NFTA_INNER_FLAGS])); if (flags & ~NFT_INNER_MASK) return -EOPNOTSUPP; num = ntohl(nla_get_be32(tb[NFTA_INNER_NUM])); if (num != 0) return -EOPNOTSUPP; hdrsize = ntohl(nla_get_be32(tb[NFTA_INNER_HDRSIZE])); type = ntohl(nla_get_be32(tb[NFTA_INNER_TYPE])); if (type > U8_MAX) return -EINVAL; if (flags & NFT_INNER_HDRSIZE) { if (hdrsize == 0 || hdrsize > 64) return -EOPNOTSUPP; } priv->flags = flags; priv->hdrsize = hdrsize; priv->type = type; err = nft_expr_inner_parse(ctx, tb[NFTA_INNER_EXPR], &expr_info); if (err < 0) return err; priv->expr.ops = expr_info.ops; if (!strcmp(expr_info.ops->type->name, "payload")) priv->expr_type = NFT_INNER_EXPR_PAYLOAD; else if (!strcmp(expr_info.ops->type->name, "meta")) priv->expr_type = NFT_INNER_EXPR_META; else return -EINVAL; err = expr_info.ops->init(ctx, (struct nft_expr *)&priv->expr, (const struct nlattr * const*)expr_info.tb); if (err < 0) return err; return 0; } static int nft_inner_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_inner *priv = nft_expr_priv(expr); if (nla_put_be32(skb, NFTA_INNER_NUM, htonl(0)) || nla_put_be32(skb, NFTA_INNER_TYPE, htonl(priv->type)) || nla_put_be32(skb, NFTA_INNER_FLAGS, htonl(priv->flags)) || nla_put_be32(skb, NFTA_INNER_HDRSIZE, htonl(priv->hdrsize))) goto nla_put_failure; if (nft_expr_dump(skb, NFTA_INNER_EXPR, (struct nft_expr *)&priv->expr, reset) < 0) goto nla_put_failure; return 0; nla_put_failure: return -1; } static const struct nft_expr_ops nft_inner_ops = { .type = &nft_inner_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_inner)), .eval = nft_inner_eval, .init = nft_inner_init, .dump = nft_inner_dump, }; struct nft_expr_type nft_inner_type __read_mostly = { .name = "inner", .ops = &nft_inner_ops, .policy = nft_inner_policy, .maxattr = NFTA_INNER_MAX, .owner = THIS_MODULE, };
1 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 // SPDX-License-Identifier: GPL-2.0-or-later /* * Force feedback support for SmartJoy PLUS PS2->USB adapter * * Copyright (c) 2009 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * Based of hid-pl.c and hid-gaff.c * Copyright (c) 2007, 2009 Anssi Hannula <anssi.hannula@gmail.com> * Copyright (c) 2008 Lukasz Lubojanski <lukasz@lubojanski.info> */ /* */ /* #define DEBUG */ #include <linux/input.h> #include <linux/slab.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" #ifdef CONFIG_SMARTJOYPLUS_FF struct sjoyff_device { struct hid_report *report; }; static int hid_sjoyff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct sjoyff_device *sjoyff = data; u32 left, right; left = effect->u.rumble.strong_magnitude; right = effect->u.rumble.weak_magnitude; dev_dbg(&dev->dev, "called with 0x%08x 0x%08x\n", left, right); left = left * 0xff / 0xffff; right = (right != 0); /* on/off only */ sjoyff->report->field[0]->value[1] = right; sjoyff->report->field[0]->value[2] = left; dev_dbg(&dev->dev, "running with 0x%02x 0x%02x\n", left, right); hid_hw_request(hid, sjoyff->report, HID_REQ_SET_REPORT); return 0; } static int sjoyff_init(struct hid_device *hid) { struct sjoyff_device *sjoyff; struct hid_report *report; struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct list_head *report_ptr = report_list; struct input_dev *dev; int error; if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; } list_for_each_entry(hidinput, &hid->inputs, list) { report_ptr = report_ptr->next; if (report_ptr == report_list) { hid_err(hid, "required output report is missing\n"); return -ENODEV; } report = list_entry(report_ptr, struct hid_report, list); if (report->maxfield < 1) { hid_err(hid, "no fields in the report\n"); return -ENODEV; } if (report->field[0]->report_count < 3) { hid_err(hid, "not enough values in the field\n"); return -ENODEV; } sjoyff = kzalloc(sizeof(struct sjoyff_device), GFP_KERNEL); if (!sjoyff) return -ENOMEM; dev = hidinput->input; set_bit(FF_RUMBLE, dev->ffbit); error = input_ff_create_memless(dev, sjoyff, hid_sjoyff_play); if (error) { kfree(sjoyff); return error; } sjoyff->report = report; sjoyff->report->field[0]->value[0] = 0x01; sjoyff->report->field[0]->value[1] = 0x00; sjoyff->report->field[0]->value[2] = 0x00; hid_hw_request(hid, sjoyff->report, HID_REQ_SET_REPORT); } hid_info(hid, "Force feedback for SmartJoy PLUS PS2/USB adapter\n"); return 0; } #else static inline int sjoyff_init(struct hid_device *hid) { return 0; } #endif static int sjoy_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; hdev->quirks |= id->driver_data; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } sjoyff_init(hdev); return 0; err: return ret; } static const struct hid_device_id sjoy_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO), .driver_data = HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO), .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO), .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD), .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII), .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { } }; MODULE_DEVICE_TABLE(hid, sjoy_devices); static struct hid_driver sjoy_driver = { .name = "smartjoyplus", .id_table = sjoy_devices, .probe = sjoy_probe, }; module_hid_driver(sjoy_driver); MODULE_DESCRIPTION("Force feedback support for SmartJoy PLUS PS2->USB adapter"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jussi Kivilinna");
10 18 11 18 14 3 5 14 14 12 12 10 2 1 4 2 2 1 1 2 2 1297 1255 70 33 5 8 8 10 1 9 9 2 3 3 4 5 4 9 9 15 1 1 13 1 5 10 10 3 2 10 5 5 13 2 2 1 1 43 1 1 1 6 1 4 5 4 10 3 7 10 13 2 10 5 1 5 2 2 1 2 3 1 2 1 2 1 3 3 1 4 1 2 1 2 1 2 20 1 2 1 1 2 2 1 1 1 1 1 1 2 2 1 6 2 2 9 9 5 9 29 1 1 1 25 1 1 25 23 2 14 14 14 14 11 8 1 1 5 2 6 6 3 3 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* raw.c - Raw sockets for protocol family CAN * * Copyright (c) 2002-2007 Volkswagen Group Electronic Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Volkswagen nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * The provided data structures and external interfaces from this code * are not restricted to be used by modules with a GPL compatible license. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/uio.h> #include <linux/net.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/socket.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/can.h> #include <linux/can/core.h> #include <linux/can/dev.h> /* for can_is_canxl_dev_mtu() */ #include <linux/can/skb.h> #include <linux/can/raw.h> #include <net/sock.h> #include <net/net_namespace.h> MODULE_DESCRIPTION("PF_CAN raw protocol"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>"); MODULE_ALIAS("can-proto-1"); #define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex) #define MASK_ALL 0 /* A raw socket has a list of can_filters attached to it, each receiving * the CAN frames matching that filter. If the filter list is empty, * no CAN frames will be received by the socket. The default after * opening the socket, is to have one filter which receives all frames. * The filter list is allocated dynamically with the exception of the * list containing only one item. This common case is optimized by * storing the single filter in dfilter, to avoid using dynamic memory. */ struct uniqframe { const struct sk_buff *skb; int skbcnt; unsigned int join_rx_count; }; struct raw_sock { struct sock sk; struct net_device *dev; netdevice_tracker dev_tracker; struct list_head notifier; int ifindex; unsigned int bound:1; unsigned int loopback:1; unsigned int recv_own_msgs:1; unsigned int fd_frames:1; unsigned int xl_frames:1; unsigned int join_filters:1; struct can_raw_vcid_options raw_vcid_opts; canid_t tx_vcid_shifted; canid_t rx_vcid_shifted; canid_t rx_vcid_mask_shifted; can_err_mask_t err_mask; int count; /* number of active filters */ struct can_filter dfilter; /* default/single filter */ struct can_filter *filter; /* pointer to filter(s) */ struct uniqframe __percpu *uniq; }; static LIST_HEAD(raw_notifier_list); static DEFINE_SPINLOCK(raw_notifier_lock); static struct raw_sock *raw_busy_notifier; /* Return pointer to store the extra msg flags for raw_recvmsg(). * We use the space of one unsigned int beyond the 'struct sockaddr_can' * in skb->cb. */ static inline unsigned int *raw_flags(struct sk_buff *skb) { sock_skb_cb_check_size(sizeof(struct sockaddr_can) + sizeof(unsigned int)); /* return pointer after struct sockaddr_can */ return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]); } static inline struct raw_sock *raw_sk(const struct sock *sk) { return (struct raw_sock *)sk; } static void raw_rcv(struct sk_buff *oskb, void *data) { struct sock *sk = (struct sock *)data; struct raw_sock *ro = raw_sk(sk); enum skb_drop_reason reason; struct sockaddr_can *addr; struct sk_buff *skb; unsigned int *pflags; /* check the received tx sock reference */ if (!ro->recv_own_msgs && oskb->sk == sk) return; /* make sure to not pass oversized frames to the socket */ if (!ro->fd_frames && can_is_canfd_skb(oskb)) return; if (can_is_canxl_skb(oskb)) { struct canxl_frame *cxl = (struct canxl_frame *)oskb->data; /* make sure to not pass oversized frames to the socket */ if (!ro->xl_frames) return; /* filter CAN XL VCID content */ if (ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_RX_FILTER) { /* apply VCID filter if user enabled the filter */ if ((cxl->prio & ro->rx_vcid_mask_shifted) != (ro->rx_vcid_shifted & ro->rx_vcid_mask_shifted)) return; } else { /* no filter => do not forward VCID tagged frames */ if (cxl->prio & CANXL_VCID_MASK) return; } } /* eliminate multiple filter matches for the same skb */ if (this_cpu_ptr(ro->uniq)->skb == oskb && this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) { if (!ro->join_filters) return; this_cpu_inc(ro->uniq->join_rx_count); /* drop frame until all enabled filters matched */ if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count) return; } else { this_cpu_ptr(ro->uniq)->skb = oskb; this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt; this_cpu_ptr(ro->uniq)->join_rx_count = 1; /* drop first frame to check all enabled filters? */ if (ro->join_filters && ro->count > 1) return; } /* clone the given skb to be able to enqueue it into the rcv queue */ skb = skb_clone(oskb, GFP_ATOMIC); if (!skb) return; /* Put the datagram to the queue so that raw_recvmsg() can get * it from there. We need to pass the interface index to * raw_recvmsg(). We pass a whole struct sockaddr_can in * skb->cb containing the interface index. */ sock_skb_cb_check_size(sizeof(struct sockaddr_can)); addr = (struct sockaddr_can *)skb->cb; memset(addr, 0, sizeof(*addr)); addr->can_family = AF_CAN; addr->can_ifindex = skb->dev->ifindex; /* add CAN specific message flags for raw_recvmsg() */ pflags = raw_flags(skb); *pflags = 0; if (oskb->sk) *pflags |= MSG_DONTROUTE; if (oskb->sk == sk) *pflags |= MSG_CONFIRM; if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) sk_skb_reason_drop(sk, skb, reason); } static int raw_enable_filters(struct net *net, struct net_device *dev, struct sock *sk, struct can_filter *filter, int count) { int err = 0; int i; for (i = 0; i < count; i++) { err = can_rx_register(net, dev, filter[i].can_id, filter[i].can_mask, raw_rcv, sk, "raw", sk); if (err) { /* clean up successfully registered filters */ while (--i >= 0) can_rx_unregister(net, dev, filter[i].can_id, filter[i].can_mask, raw_rcv, sk); break; } } return err; } static int raw_enable_errfilter(struct net *net, struct net_device *dev, struct sock *sk, can_err_mask_t err_mask) { int err = 0; if (err_mask) err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG, raw_rcv, sk, "raw", sk); return err; } static void raw_disable_filters(struct net *net, struct net_device *dev, struct sock *sk, struct can_filter *filter, int count) { int i; for (i = 0; i < count; i++) can_rx_unregister(net, dev, filter[i].can_id, filter[i].can_mask, raw_rcv, sk); } static inline void raw_disable_errfilter(struct net *net, struct net_device *dev, struct sock *sk, can_err_mask_t err_mask) { if (err_mask) can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG, raw_rcv, sk); } static inline void raw_disable_allfilters(struct net *net, struct net_device *dev, struct sock *sk) { struct raw_sock *ro = raw_sk(sk); raw_disable_filters(net, dev, sk, ro->filter, ro->count); raw_disable_errfilter(net, dev, sk, ro->err_mask); } static int raw_enable_allfilters(struct net *net, struct net_device *dev, struct sock *sk) { struct raw_sock *ro = raw_sk(sk); int err; err = raw_enable_filters(net, dev, sk, ro->filter, ro->count); if (!err) { err = raw_enable_errfilter(net, dev, sk, ro->err_mask); if (err) raw_disable_filters(net, dev, sk, ro->filter, ro->count); } return err; } static void raw_notify(struct raw_sock *ro, unsigned long msg, struct net_device *dev) { struct sock *sk = &ro->sk; if (!net_eq(dev_net(dev), sock_net(sk))) return; if (ro->dev != dev) return; switch (msg) { case NETDEV_UNREGISTER: lock_sock(sk); /* remove current filters & unregister */ if (ro->bound) { raw_disable_allfilters(dev_net(dev), dev, sk); netdev_put(dev, &ro->dev_tracker); } if (ro->count > 1) kfree(ro->filter); ro->ifindex = 0; ro->bound = 0; ro->dev = NULL; ro->count = 0; release_sock(sk); sk->sk_err = ENODEV; if (!sock_flag(sk, SOCK_DEAD)) sk_error_report(sk); break; case NETDEV_DOWN: sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk_error_report(sk); break; } } static int raw_notifier(struct notifier_block *nb, unsigned long msg, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (dev->type != ARPHRD_CAN) return NOTIFY_DONE; if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN) return NOTIFY_DONE; if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */ return NOTIFY_DONE; spin_lock(&raw_notifier_lock); list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) { spin_unlock(&raw_notifier_lock); raw_notify(raw_busy_notifier, msg, dev); spin_lock(&raw_notifier_lock); } raw_busy_notifier = NULL; spin_unlock(&raw_notifier_lock); return NOTIFY_DONE; } static int raw_init(struct sock *sk) { struct raw_sock *ro = raw_sk(sk); ro->bound = 0; ro->ifindex = 0; ro->dev = NULL; /* set default filter to single entry dfilter */ ro->dfilter.can_id = 0; ro->dfilter.can_mask = MASK_ALL; ro->filter = &ro->dfilter; ro->count = 1; /* set default loopback behaviour */ ro->loopback = 1; ro->recv_own_msgs = 0; ro->fd_frames = 0; ro->xl_frames = 0; ro->join_filters = 0; /* alloc_percpu provides zero'ed memory */ ro->uniq = alloc_percpu(struct uniqframe); if (unlikely(!ro->uniq)) return -ENOMEM; /* set notifier */ spin_lock(&raw_notifier_lock); list_add_tail(&ro->notifier, &raw_notifier_list); spin_unlock(&raw_notifier_lock); return 0; } static int raw_release(struct socket *sock) { struct sock *sk = sock->sk; struct raw_sock *ro; struct net *net; if (!sk) return 0; ro = raw_sk(sk); net = sock_net(sk); spin_lock(&raw_notifier_lock); while (raw_busy_notifier == ro) { spin_unlock(&raw_notifier_lock); schedule_timeout_uninterruptible(1); spin_lock(&raw_notifier_lock); } list_del(&ro->notifier); spin_unlock(&raw_notifier_lock); rtnl_lock(); lock_sock(sk); /* remove current filters & unregister */ if (ro->bound) { if (ro->dev) { raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk); netdev_put(ro->dev, &ro->dev_tracker); } else { raw_disable_allfilters(net, NULL, sk); } } if (ro->count > 1) kfree(ro->filter); ro->ifindex = 0; ro->bound = 0; ro->dev = NULL; ro->count = 0; free_percpu(ro->uniq); sock_orphan(sk); sock->sk = NULL; release_sock(sk); rtnl_unlock(); sock_prot_inuse_add(net, sk->sk_prot, -1); sock_put(sk); return 0; } static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) { struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; struct sock *sk = sock->sk; struct raw_sock *ro = raw_sk(sk); struct net_device *dev = NULL; int ifindex; int err = 0; int notify_enetdown = 0; if (len < RAW_MIN_NAMELEN) return -EINVAL; if (addr->can_family != AF_CAN) return -EINVAL; rtnl_lock(); lock_sock(sk); if (ro->bound && addr->can_ifindex == ro->ifindex) goto out; if (addr->can_ifindex) { dev = dev_get_by_index(sock_net(sk), addr->can_ifindex); if (!dev) { err = -ENODEV; goto out; } if (dev->type != ARPHRD_CAN) { err = -ENODEV; goto out_put_dev; } if (!(dev->flags & IFF_UP)) notify_enetdown = 1; ifindex = dev->ifindex; /* filters set by default/setsockopt */ err = raw_enable_allfilters(sock_net(sk), dev, sk); if (err) goto out_put_dev; } else { ifindex = 0; /* filters set by default/setsockopt */ err = raw_enable_allfilters(sock_net(sk), NULL, sk); } if (!err) { if (ro->bound) { /* unregister old filters */ if (ro->dev) { raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk); /* drop reference to old ro->dev */ netdev_put(ro->dev, &ro->dev_tracker); } else { raw_disable_allfilters(sock_net(sk), NULL, sk); } } ro->ifindex = ifindex; ro->bound = 1; /* bind() ok -> hold a reference for new ro->dev */ ro->dev = dev; if (ro->dev) netdev_hold(ro->dev, &ro->dev_tracker, GFP_KERNEL); } out_put_dev: /* remove potential reference from dev_get_by_index() */ dev_put(dev); out: release_sock(sk); rtnl_unlock(); if (notify_enetdown) { sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk_error_report(sk); } return err; } static int raw_getname(struct socket *sock, struct sockaddr *uaddr, int peer) { struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; struct sock *sk = sock->sk; struct raw_sock *ro = raw_sk(sk); if (peer) return -EOPNOTSUPP; memset(addr, 0, RAW_MIN_NAMELEN); addr->can_family = AF_CAN; addr->can_ifindex = ro->ifindex; return RAW_MIN_NAMELEN; } static int raw_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct raw_sock *ro = raw_sk(sk); struct can_filter *filter = NULL; /* dyn. alloc'ed filters */ struct can_filter sfilter; /* single filter */ struct net_device *dev = NULL; can_err_mask_t err_mask = 0; int count = 0; int flag; int err = 0; if (level != SOL_CAN_RAW) return -EINVAL; switch (optname) { case CAN_RAW_FILTER: if (optlen % sizeof(struct can_filter) != 0) return -EINVAL; if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter)) return -EINVAL; count = optlen / sizeof(struct can_filter); if (count > 1) { /* filter does not fit into dfilter => alloc space */ filter = memdup_sockptr(optval, optlen); if (IS_ERR(filter)) return PTR_ERR(filter); } else if (count == 1) { if (copy_from_sockptr(&sfilter, optval, sizeof(sfilter))) return -EFAULT; } rtnl_lock(); lock_sock(sk); dev = ro->dev; if (ro->bound && dev) { if (dev->reg_state != NETREG_REGISTERED) { if (count > 1) kfree(filter); err = -ENODEV; goto out_fil; } } if (ro->bound) { /* (try to) register the new filters */ if (count == 1) err = raw_enable_filters(sock_net(sk), dev, sk, &sfilter, 1); else err = raw_enable_filters(sock_net(sk), dev, sk, filter, count); if (err) { if (count > 1) kfree(filter); goto out_fil; } /* remove old filter registrations */ raw_disable_filters(sock_net(sk), dev, sk, ro->filter, ro->count); } /* remove old filter space */ if (ro->count > 1) kfree(ro->filter); /* link new filters to the socket */ if (count == 1) { /* copy filter data for single filter */ ro->dfilter = sfilter; filter = &ro->dfilter; } ro->filter = filter; ro->count = count; out_fil: release_sock(sk); rtnl_unlock(); break; case CAN_RAW_ERR_FILTER: if (optlen != sizeof(err_mask)) return -EINVAL; if (copy_from_sockptr(&err_mask, optval, optlen)) return -EFAULT; err_mask &= CAN_ERR_MASK; rtnl_lock(); lock_sock(sk); dev = ro->dev; if (ro->bound && dev) { if (dev->reg_state != NETREG_REGISTERED) { err = -ENODEV; goto out_err; } } /* remove current error mask */ if (ro->bound) { /* (try to) register the new err_mask */ err = raw_enable_errfilter(sock_net(sk), dev, sk, err_mask); if (err) goto out_err; /* remove old err_mask registration */ raw_disable_errfilter(sock_net(sk), dev, sk, ro->err_mask); } /* link new err_mask to the socket */ ro->err_mask = err_mask; out_err: release_sock(sk); rtnl_unlock(); break; case CAN_RAW_LOOPBACK: if (optlen != sizeof(flag)) return -EINVAL; if (copy_from_sockptr(&flag, optval, optlen)) return -EFAULT; ro->loopback = !!flag; break; case CAN_RAW_RECV_OWN_MSGS: if (optlen != sizeof(flag)) return -EINVAL; if (copy_from_sockptr(&flag, optval, optlen)) return -EFAULT; ro->recv_own_msgs = !!flag; break; case CAN_RAW_FD_FRAMES: if (optlen != sizeof(flag)) return -EINVAL; if (copy_from_sockptr(&flag, optval, optlen)) return -EFAULT; /* Enabling CAN XL includes CAN FD */ if (ro->xl_frames && !flag) return -EINVAL; ro->fd_frames = !!flag; break; case CAN_RAW_XL_FRAMES: if (optlen != sizeof(flag)) return -EINVAL; if (copy_from_sockptr(&flag, optval, optlen)) return -EFAULT; ro->xl_frames = !!flag; /* Enabling CAN XL includes CAN FD */ if (ro->xl_frames) ro->fd_frames = ro->xl_frames; break; case CAN_RAW_XL_VCID_OPTS: if (optlen != sizeof(ro->raw_vcid_opts)) return -EINVAL; if (copy_from_sockptr(&ro->raw_vcid_opts, optval, optlen)) return -EFAULT; /* prepare 32 bit values for handling in hot path */ ro->tx_vcid_shifted = ro->raw_vcid_opts.tx_vcid << CANXL_VCID_OFFSET; ro->rx_vcid_shifted = ro->raw_vcid_opts.rx_vcid << CANXL_VCID_OFFSET; ro->rx_vcid_mask_shifted = ro->raw_vcid_opts.rx_vcid_mask << CANXL_VCID_OFFSET; break; case CAN_RAW_JOIN_FILTERS: if (optlen != sizeof(flag)) return -EINVAL; if (copy_from_sockptr(&flag, optval, optlen)) return -EFAULT; ro->join_filters = !!flag; break; default: return -ENOPROTOOPT; } return err; } static int raw_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct raw_sock *ro = raw_sk(sk); int flag; int len; void *val; if (level != SOL_CAN_RAW) return -EINVAL; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case CAN_RAW_FILTER: { int err = 0; lock_sock(sk); if (ro->count > 0) { int fsize = ro->count * sizeof(struct can_filter); /* user space buffer to small for filter list? */ if (len < fsize) { /* return -ERANGE and needed space in optlen */ err = -ERANGE; if (put_user(fsize, optlen)) err = -EFAULT; } else { if (len > fsize) len = fsize; if (copy_to_user(optval, ro->filter, len)) err = -EFAULT; } } else { len = 0; } release_sock(sk); if (!err) err = put_user(len, optlen); return err; } case CAN_RAW_ERR_FILTER: if (len > sizeof(can_err_mask_t)) len = sizeof(can_err_mask_t); val = &ro->err_mask; break; case CAN_RAW_LOOPBACK: if (len > sizeof(int)) len = sizeof(int); flag = ro->loopback; val = &flag; break; case CAN_RAW_RECV_OWN_MSGS: if (len > sizeof(int)) len = sizeof(int); flag = ro->recv_own_msgs; val = &flag; break; case CAN_RAW_FD_FRAMES: if (len > sizeof(int)) len = sizeof(int); flag = ro->fd_frames; val = &flag; break; case CAN_RAW_XL_FRAMES: if (len > sizeof(int)) len = sizeof(int); flag = ro->xl_frames; val = &flag; break; case CAN_RAW_XL_VCID_OPTS: { int err = 0; /* user space buffer to small for VCID opts? */ if (len < sizeof(ro->raw_vcid_opts)) { /* return -ERANGE and needed space in optlen */ err = -ERANGE; if (put_user(sizeof(ro->raw_vcid_opts), optlen)) err = -EFAULT; } else { if (len > sizeof(ro->raw_vcid_opts)) len = sizeof(ro->raw_vcid_opts); if (copy_to_user(optval, &ro->raw_vcid_opts, len)) err = -EFAULT; } if (!err) err = put_user(len, optlen); return err; } case CAN_RAW_JOIN_FILTERS: if (len > sizeof(int)) len = sizeof(int); flag = ro->join_filters; val = &flag; break; default: return -ENOPROTOOPT; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, val, len)) return -EFAULT; return 0; } static void raw_put_canxl_vcid(struct raw_sock *ro, struct sk_buff *skb) { struct canxl_frame *cxl = (struct canxl_frame *)skb->data; /* sanitize non CAN XL bits */ cxl->prio &= (CANXL_PRIO_MASK | CANXL_VCID_MASK); /* clear VCID in CAN XL frame if pass through is disabled */ if (!(ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_TX_PASS)) cxl->prio &= CANXL_PRIO_MASK; /* set VCID in CAN XL frame if enabled */ if (ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_TX_SET) { cxl->prio &= CANXL_PRIO_MASK; cxl->prio |= ro->tx_vcid_shifted; } } static unsigned int raw_check_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu) { /* Classical CAN -> no checks for flags and device capabilities */ if (can_is_can_skb(skb)) return CAN_MTU; /* CAN FD -> needs to be enabled and a CAN FD or CAN XL device */ if (ro->fd_frames && can_is_canfd_skb(skb) && (mtu == CANFD_MTU || can_is_canxl_dev_mtu(mtu))) return CANFD_MTU; /* CAN XL -> needs to be enabled and a CAN XL device */ if (ro->xl_frames && can_is_canxl_skb(skb) && can_is_canxl_dev_mtu(mtu)) return CANXL_MTU; return 0; } static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; struct raw_sock *ro = raw_sk(sk); struct sockcm_cookie sockc; struct sk_buff *skb; struct net_device *dev; unsigned int txmtu; int ifindex; int err = -EINVAL; /* check for valid CAN frame sizes */ if (size < CANXL_HDR_SIZE + CANXL_MIN_DLEN || size > CANXL_MTU) return -EINVAL; if (msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); if (msg->msg_namelen < RAW_MIN_NAMELEN) return -EINVAL; if (addr->can_family != AF_CAN) return -EINVAL; ifindex = addr->can_ifindex; } else { ifindex = ro->ifindex; } dev = dev_get_by_index(sock_net(sk), ifindex); if (!dev) return -ENXIO; skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv), msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) goto put_dev; can_skb_reserve(skb); can_skb_prv(skb)->ifindex = dev->ifindex; can_skb_prv(skb)->skbcnt = 0; /* fill the skb before testing for valid CAN frames */ err = memcpy_from_msg(skb_put(skb, size), msg, size); if (err < 0) goto free_skb; err = -EINVAL; /* check for valid CAN (CC/FD/XL) frame content */ txmtu = raw_check_txframe(ro, skb, READ_ONCE(dev->mtu)); if (!txmtu) goto free_skb; /* only CANXL: clear/forward/set VCID value */ if (txmtu == CANXL_MTU) raw_put_canxl_vcid(ro, skb); sockcm_init(&sockc, sk); if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) goto free_skb; } skb->dev = dev; skb->priority = sockc.priority; skb->mark = sockc.mark; skb->tstamp = sockc.transmit_time; skb_setup_tx_timestamp(skb, &sockc); err = can_send(skb, ro->loopback); dev_put(dev); if (err) goto send_failed; return size; free_skb: kfree_skb(skb); put_dev: dev_put(dev); send_failed: return err; } static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int err = 0; if (flags & MSG_ERRQUEUE) return sock_recv_errqueue(sk, msg, size, SOL_CAN_RAW, SCM_CAN_RAW_ERRQUEUE); skb = skb_recv_datagram(sk, flags, &err); if (!skb) return err; if (size < skb->len) msg->msg_flags |= MSG_TRUNC; else size = skb->len; err = memcpy_to_msg(msg, skb->data, size); if (err < 0) { skb_free_datagram(sk, skb); return err; } sock_recv_cmsgs(msg, sk, skb); if (msg->msg_name) { __sockaddr_check_size(RAW_MIN_NAMELEN); msg->msg_namelen = RAW_MIN_NAMELEN; memcpy(msg->msg_name, skb->cb, msg->msg_namelen); } /* assign the flags that have been recorded in raw_rcv() */ msg->msg_flags |= *(raw_flags(skb)); skb_free_datagram(sk, skb); return size; } static int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd, unsigned long arg) { /* no ioctls for socket layer -> hand it down to NIC layer */ return -ENOIOCTLCMD; } static const struct proto_ops raw_ops = { .family = PF_CAN, .release = raw_release, .bind = raw_bind, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = raw_getname, .poll = datagram_poll, .ioctl = raw_sock_no_ioctlcmd, .gettstamp = sock_gettstamp, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = raw_setsockopt, .getsockopt = raw_getsockopt, .sendmsg = raw_sendmsg, .recvmsg = raw_recvmsg, .mmap = sock_no_mmap, }; static struct proto raw_proto __read_mostly = { .name = "CAN_RAW", .owner = THIS_MODULE, .obj_size = sizeof(struct raw_sock), .init = raw_init, }; static const struct can_proto raw_can_proto = { .type = SOCK_RAW, .protocol = CAN_RAW, .ops = &raw_ops, .prot = &raw_proto, }; static struct notifier_block canraw_notifier = { .notifier_call = raw_notifier }; static __init int raw_module_init(void) { int err; pr_info("can: raw protocol\n"); err = register_netdevice_notifier(&canraw_notifier); if (err) return err; err = can_proto_register(&raw_can_proto); if (err < 0) { pr_err("can: registration of raw protocol failed\n"); goto register_proto_failed; } return 0; register_proto_failed: unregister_netdevice_notifier(&canraw_notifier); return err; } static __exit void raw_module_exit(void) { can_proto_unregister(&raw_can_proto); unregister_netdevice_notifier(&canraw_notifier); } module_init(raw_module_init); module_exit(raw_module_exit);
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 // SPDX-License-Identifier: GPL-2.0+ /* * usbduxsigma.c * Copyright (C) 2011-2015 Bernd Porr, mail@berndporr.me.uk */ /* * Driver: usbduxsigma * Description: University of Stirling USB DAQ & INCITE Technology Limited * Devices: [ITL] USB-DUX-SIGMA (usbduxsigma) * Author: Bernd Porr <mail@berndporr.me.uk> * Updated: 20 July 2015 * Status: stable */ /* * I must give credit here to Chris Baugher who * wrote the driver for AT-MIO-16d. I used some parts of this * driver. I also must give credits to David Brownell * who supported me with the USB development. * * Note: the raw data from the A/D converter is 24 bit big endian * anything else is little endian to/from the dux board * * * Revision history: * 0.1: initial version * 0.2: all basic functions implemented, digital I/O only for one port * 0.3: proper vendor ID and driver name * 0.4: fixed D/A voltage range * 0.5: various bug fixes, health check at startup * 0.6: corrected wrong input range * 0.7: rewrite code that urb->interval is always 1 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/fcntl.h> #include <linux/compiler.h> #include <linux/unaligned.h> #include <linux/comedi/comedi_usb.h> /* timeout for the USB-transfer in ms*/ #define BULK_TIMEOUT 1000 /* constants for "firmware" upload and download */ #define FIRMWARE "usbduxsigma_firmware.bin" #define FIRMWARE_MAX_LEN 0x4000 #define USBDUXSUB_FIRMWARE 0xa0 #define VENDOR_DIR_IN 0xc0 #define VENDOR_DIR_OUT 0x40 /* internal addresses of the 8051 processor */ #define USBDUXSUB_CPUCS 0xE600 /* 300Hz max frequ under PWM */ #define MIN_PWM_PERIOD ((long)(1E9 / 300)) /* Default PWM frequency */ #define PWM_DEFAULT_PERIOD ((long)(1E9 / 100)) /* Number of channels (16 AD and offset)*/ #define NUMCHANNELS 16 /* Size of one A/D value */ #define SIZEADIN ((sizeof(u32))) /* * Size of the async input-buffer IN BYTES, the DIO state is transmitted * as the first byte. */ #define SIZEINBUF (((NUMCHANNELS + 1) * SIZEADIN)) /* 16 bytes. */ #define SIZEINSNBUF 16 /* Number of DA channels */ #define NUMOUTCHANNELS 8 /* size of one value for the D/A converter: channel and value */ #define SIZEDAOUT ((sizeof(u8) + sizeof(uint16_t))) /* * Size of the output-buffer in bytes * Actually only the first 4 triplets are used but for the * high speed mode we need to pad it to 8 (microframes). */ #define SIZEOUTBUF ((8 * SIZEDAOUT)) /* * Size of the buffer for the dux commands: just now max size is determined * by the analogue out + command byte + panic bytes... */ #define SIZEOFDUXBUFFER ((8 * SIZEDAOUT + 2)) /* Number of in-URBs which receive the data: min=2 */ #define NUMOFINBUFFERSFULL 5 /* Number of out-URBs which send the data: min=2 */ #define NUMOFOUTBUFFERSFULL 5 /* Number of in-URBs which receive the data: min=5 */ /* must have more buffers due to buggy USB ctr */ #define NUMOFINBUFFERSHIGH 10 /* Number of out-URBs which send the data: min=5 */ /* must have more buffers due to buggy USB ctr */ #define NUMOFOUTBUFFERSHIGH 10 /* number of retries to get the right dux command */ #define RETRIES 10 /* bulk transfer commands to usbduxsigma */ #define USBBUXSIGMA_AD_CMD 9 #define USBDUXSIGMA_DA_CMD 1 #define USBDUXSIGMA_DIO_CFG_CMD 2 #define USBDUXSIGMA_DIO_BITS_CMD 3 #define USBDUXSIGMA_SINGLE_AD_CMD 4 #define USBDUXSIGMA_PWM_ON_CMD 7 #define USBDUXSIGMA_PWM_OFF_CMD 8 static const struct comedi_lrange usbduxsigma_ai_range = { 1, { BIP_RANGE(2.5 * 0x800000 / 0x780000 / 2.0) } }; struct usbduxsigma_private { /* actual number of in-buffers */ int n_ai_urbs; /* actual number of out-buffers */ int n_ao_urbs; /* ISO-transfer handling: buffers */ struct urb **ai_urbs; struct urb **ao_urbs; /* pwm-transfer handling */ struct urb *pwm_urb; /* PWM period */ unsigned int pwm_period; /* PWM internal delay for the GPIF in the FX2 */ u8 pwm_delay; /* size of the PWM buffer which holds the bit pattern */ int pwm_buf_sz; /* input buffer for the ISO-transfer */ __be32 *in_buf; /* input buffer for single insn */ u8 *insn_buf; unsigned high_speed:1; unsigned ai_cmd_running:1; unsigned ao_cmd_running:1; unsigned pwm_cmd_running:1; /* time between samples in units of the timer */ unsigned int ai_timer; unsigned int ao_timer; /* counter between acquisitions */ unsigned int ai_counter; unsigned int ao_counter; /* interval in frames/uframes */ unsigned int ai_interval; /* commands */ u8 *dux_commands; struct mutex mut; }; static void usbduxsigma_unlink_urbs(struct urb **urbs, int num_urbs) { int i; for (i = 0; i < num_urbs; i++) usb_kill_urb(urbs[i]); } static void usbduxsigma_ai_stop(struct comedi_device *dev, int do_unlink) { struct usbduxsigma_private *devpriv = dev->private; if (do_unlink && devpriv->ai_urbs) usbduxsigma_unlink_urbs(devpriv->ai_urbs, devpriv->n_ai_urbs); devpriv->ai_cmd_running = 0; } static int usbduxsigma_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct usbduxsigma_private *devpriv = dev->private; mutex_lock(&devpriv->mut); /* unlink only if it is really running */ usbduxsigma_ai_stop(dev, devpriv->ai_cmd_running); mutex_unlock(&devpriv->mut); return 0; } static void usbduxsigma_ai_handle_urb(struct comedi_device *dev, struct comedi_subdevice *s, struct urb *urb) { struct usbduxsigma_private *devpriv = dev->private; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; u32 val; int ret; int i; if ((urb->actual_length > 0) && (urb->status != -EXDEV)) { devpriv->ai_counter--; if (devpriv->ai_counter == 0) { devpriv->ai_counter = devpriv->ai_timer; /* * Get the data from the USB bus and hand it over * to comedi. Note, first byte is the DIO state. */ for (i = 0; i < cmd->chanlist_len; i++) { val = be32_to_cpu(devpriv->in_buf[i + 1]); val &= 0x00ffffff; /* strip status byte */ val = comedi_offset_munge(s, val); if (!comedi_buf_write_samples(s, &val, 1)) return; } if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg) async->events |= COMEDI_CB_EOA; } } /* if command is still running, resubmit urb */ if (!(async->events & COMEDI_CB_CANCEL_MASK)) { urb->dev = comedi_to_usb_dev(dev); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) { dev_err(dev->class_dev, "urb resubmit failed (%d)\n", ret); if (ret == -EL2NSYNC) dev_err(dev->class_dev, "buggy USB host controller or bug in IRQ handler\n"); async->events |= COMEDI_CB_ERROR; } } } static void usbduxsigma_ai_urb_complete(struct urb *urb) { struct comedi_device *dev = urb->context; struct usbduxsigma_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async = s->async; /* exit if not running a command, do not resubmit urb */ if (!devpriv->ai_cmd_running) return; switch (urb->status) { case 0: /* copy the result in the transfer buffer */ memcpy(devpriv->in_buf, urb->transfer_buffer, SIZEINBUF); usbduxsigma_ai_handle_urb(dev, s, urb); break; case -EILSEQ: /* * error in the ISOchronous data * we don't copy the data into the transfer buffer * and recycle the last data byte */ dev_dbg(dev->class_dev, "CRC error in ISO IN stream\n"); usbduxsigma_ai_handle_urb(dev, s, urb); break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -ECONNABORTED: /* happens after an unlink command */ async->events |= COMEDI_CB_ERROR; break; default: /* a real error */ dev_err(dev->class_dev, "non-zero urb status (%d)\n", urb->status); async->events |= COMEDI_CB_ERROR; break; } /* * comedi_handle_events() cannot be used in this driver. The (*cancel) * operation would unlink the urb. */ if (async->events & COMEDI_CB_CANCEL_MASK) usbduxsigma_ai_stop(dev, 0); comedi_event(dev, s); } static void usbduxsigma_ao_stop(struct comedi_device *dev, int do_unlink) { struct usbduxsigma_private *devpriv = dev->private; if (do_unlink && devpriv->ao_urbs) usbduxsigma_unlink_urbs(devpriv->ao_urbs, devpriv->n_ao_urbs); devpriv->ao_cmd_running = 0; } static int usbduxsigma_ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct usbduxsigma_private *devpriv = dev->private; mutex_lock(&devpriv->mut); /* unlink only if it is really running */ usbduxsigma_ao_stop(dev, devpriv->ao_cmd_running); mutex_unlock(&devpriv->mut); return 0; } static void usbduxsigma_ao_handle_urb(struct comedi_device *dev, struct comedi_subdevice *s, struct urb *urb) { struct usbduxsigma_private *devpriv = dev->private; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; u8 *datap; int ret; int i; devpriv->ao_counter--; if (devpriv->ao_counter == 0) { devpriv->ao_counter = devpriv->ao_timer; if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg) { async->events |= COMEDI_CB_EOA; return; } /* transmit data to the USB bus */ datap = urb->transfer_buffer; *datap++ = cmd->chanlist_len; for (i = 0; i < cmd->chanlist_len; i++) { unsigned int chan = CR_CHAN(cmd->chanlist[i]); unsigned short val; if (!comedi_buf_read_samples(s, &val, 1)) { dev_err(dev->class_dev, "buffer underflow\n"); async->events |= COMEDI_CB_OVERFLOW; return; } *datap++ = val; *datap++ = chan; s->readback[chan] = val; } } /* if command is still running, resubmit urb */ if (!(async->events & COMEDI_CB_CANCEL_MASK)) { urb->transfer_buffer_length = SIZEOUTBUF; urb->dev = comedi_to_usb_dev(dev); urb->status = 0; urb->interval = 1; /* (u)frames */ urb->number_of_packets = 1; urb->iso_frame_desc[0].offset = 0; urb->iso_frame_desc[0].length = SIZEOUTBUF; urb->iso_frame_desc[0].status = 0; ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) { dev_err(dev->class_dev, "urb resubmit failed (%d)\n", ret); if (ret == -EL2NSYNC) dev_err(dev->class_dev, "buggy USB host controller or bug in IRQ handler\n"); async->events |= COMEDI_CB_ERROR; } } } static void usbduxsigma_ao_urb_complete(struct urb *urb) { struct comedi_device *dev = urb->context; struct usbduxsigma_private *devpriv = dev->private; struct comedi_subdevice *s = dev->write_subdev; struct comedi_async *async = s->async; /* exit if not running a command, do not resubmit urb */ if (!devpriv->ao_cmd_running) return; switch (urb->status) { case 0: usbduxsigma_ao_handle_urb(dev, s, urb); break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -ECONNABORTED: /* happens after an unlink command */ async->events |= COMEDI_CB_ERROR; break; default: /* a real error */ dev_err(dev->class_dev, "non-zero urb status (%d)\n", urb->status); async->events |= COMEDI_CB_ERROR; break; } /* * comedi_handle_events() cannot be used in this driver. The (*cancel) * operation would unlink the urb. */ if (async->events & COMEDI_CB_CANCEL_MASK) usbduxsigma_ao_stop(dev, 0); comedi_event(dev, s); } static int usbduxsigma_submit_urbs(struct comedi_device *dev, struct urb **urbs, int num_urbs, int input_urb) { struct usb_device *usb = comedi_to_usb_dev(dev); struct urb *urb; int ret; int i; /* Submit all URBs and start the transfer on the bus */ for (i = 0; i < num_urbs; i++) { urb = urbs[i]; /* in case of a resubmission after an unlink... */ if (input_urb) urb->interval = 1; urb->context = dev; urb->dev = usb; urb->status = 0; urb->transfer_flags = URB_ISO_ASAP; ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) return ret; } return 0; } static int usbduxsigma_chans_to_interval(int num_chan) { if (num_chan <= 2) return 2; /* 4kHz */ if (num_chan <= 8) return 4; /* 2kHz */ return 8; /* 1kHz */ } static int usbduxsigma_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { struct usbduxsigma_private *devpriv = dev->private; int high_speed = devpriv->high_speed; int interval = usbduxsigma_chans_to_interval(cmd->chanlist_len); unsigned int tmp; int err = 0; /* Step 1 : check if triggers are trivially valid */ err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT); err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER); err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW); err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ err |= comedi_check_trigger_is_unique(cmd->start_src); err |= comedi_check_trigger_is_unique(cmd->stop_src); /* Step 2b : and mutually compatible */ if (err) return 2; /* Step 3: check if arguments are trivially valid */ err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0); if (high_speed) { /* * In high speed mode microframes are possible. * However, during one microframe we can roughly * sample two channels. Thus, the more channels * are in the channel list the more time we need. */ err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, (125000 * interval)); } else { /* full speed */ /* 1kHz scans every USB frame */ err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, 1000000); } err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); if (cmd->stop_src == TRIG_COUNT) err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1); else /* TRIG_NONE */ err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; /* Step 4: fix up any arguments */ tmp = rounddown(cmd->scan_begin_arg, high_speed ? 125000 : 1000000); err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, tmp); if (err) return 4; return 0; } /* * creates the ADC command for the MAX1271 * range is the range value from comedi */ static void create_adc_command(unsigned int chan, u8 *muxsg0, u8 *muxsg1) { if (chan < 8) (*muxsg0) = (*muxsg0) | (1 << chan); else if (chan < 16) (*muxsg1) = (*muxsg1) | (1 << (chan - 8)); } static int usbbuxsigma_send_cmd(struct comedi_device *dev, int cmd_type) { struct usb_device *usb = comedi_to_usb_dev(dev); struct usbduxsigma_private *devpriv = dev->private; int nsent; devpriv->dux_commands[0] = cmd_type; return usb_bulk_msg(usb, usb_sndbulkpipe(usb, 1), devpriv->dux_commands, SIZEOFDUXBUFFER, &nsent, BULK_TIMEOUT); } static int usbduxsigma_receive_cmd(struct comedi_device *dev, int command) { struct usb_device *usb = comedi_to_usb_dev(dev); struct usbduxsigma_private *devpriv = dev->private; int nrec; int ret; int i; for (i = 0; i < RETRIES; i++) { ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, 8), devpriv->insn_buf, SIZEINSNBUF, &nrec, BULK_TIMEOUT); if (ret < 0) return ret; if (devpriv->insn_buf[0] == command) return 0; } /* * This is only reached if the data has been requested a * couple of times and the command was not received. */ return -EFAULT; } static int usbduxsigma_ai_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trig_num) { struct usbduxsigma_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; int ret; if (trig_num != cmd->start_arg) return -EINVAL; mutex_lock(&devpriv->mut); if (!devpriv->ai_cmd_running) { devpriv->ai_cmd_running = 1; ret = usbduxsigma_submit_urbs(dev, devpriv->ai_urbs, devpriv->n_ai_urbs, 1); if (ret < 0) { devpriv->ai_cmd_running = 0; mutex_unlock(&devpriv->mut); return ret; } s->async->inttrig = NULL; } mutex_unlock(&devpriv->mut); return 1; } static int usbduxsigma_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct usbduxsigma_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; unsigned int len = cmd->chanlist_len; u8 muxsg0 = 0; u8 muxsg1 = 0; u8 sysred = 0; int ret; int i; mutex_lock(&devpriv->mut); if (devpriv->high_speed) { /* * every 2 channels get a time window of 125us. Thus, if we * sample all 16 channels we need 1ms. If we sample only one * channel we need only 125us */ unsigned int interval = usbduxsigma_chans_to_interval(len); devpriv->ai_interval = interval; devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval); } else { /* interval always 1ms */ devpriv->ai_interval = 1; devpriv->ai_timer = cmd->scan_begin_arg / 1000000; } for (i = 0; i < len; i++) { unsigned int chan = CR_CHAN(cmd->chanlist[i]); create_adc_command(chan, &muxsg0, &muxsg1); } devpriv->dux_commands[1] = devpriv->ai_interval; devpriv->dux_commands[2] = len; /* num channels per time step */ devpriv->dux_commands[3] = 0x12; /* CONFIG0 */ devpriv->dux_commands[4] = 0x03; /* CONFIG1: 23kHz sample, delay 0us */ devpriv->dux_commands[5] = 0x00; /* CONFIG3: diff. channels off */ devpriv->dux_commands[6] = muxsg0; devpriv->dux_commands[7] = muxsg1; devpriv->dux_commands[8] = sysred; ret = usbbuxsigma_send_cmd(dev, USBBUXSIGMA_AD_CMD); if (ret < 0) { mutex_unlock(&devpriv->mut); return ret; } devpriv->ai_counter = devpriv->ai_timer; if (cmd->start_src == TRIG_NOW) { /* enable this acquisition operation */ devpriv->ai_cmd_running = 1; ret = usbduxsigma_submit_urbs(dev, devpriv->ai_urbs, devpriv->n_ai_urbs, 1); if (ret < 0) { devpriv->ai_cmd_running = 0; mutex_unlock(&devpriv->mut); return ret; } s->async->inttrig = NULL; } else { /* TRIG_INT */ s->async->inttrig = usbduxsigma_ai_inttrig; } mutex_unlock(&devpriv->mut); return 0; } static int usbduxsigma_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct usbduxsigma_private *devpriv = dev->private; unsigned int chan = CR_CHAN(insn->chanspec); u8 muxsg0 = 0; u8 muxsg1 = 0; u8 sysred = 0; int ret; int i; mutex_lock(&devpriv->mut); if (devpriv->ai_cmd_running) { mutex_unlock(&devpriv->mut); return -EBUSY; } create_adc_command(chan, &muxsg0, &muxsg1); /* Mode 0 is used to get a single conversion on demand */ devpriv->dux_commands[1] = 0x16; /* CONFIG0: chopper on */ devpriv->dux_commands[2] = 0x80; /* CONFIG1: 2kHz sampling rate */ devpriv->dux_commands[3] = 0x00; /* CONFIG3: diff. channels off */ devpriv->dux_commands[4] = muxsg0; devpriv->dux_commands[5] = muxsg1; devpriv->dux_commands[6] = sysred; /* adc commands */ ret = usbbuxsigma_send_cmd(dev, USBDUXSIGMA_SINGLE_AD_CMD); if (ret < 0) { mutex_unlock(&devpriv->mut); return ret; } for (i = 0; i < insn->n; i++) { u32 val; ret = usbduxsigma_receive_cmd(dev, USBDUXSIGMA_SINGLE_AD_CMD); if (ret < 0) { mutex_unlock(&devpriv->mut); return ret; } /* 32 bits big endian from the A/D converter */ val = be32_to_cpu(get_unaligned((__be32 *)(devpriv->insn_buf + 1))); val &= 0x00ffffff; /* strip status byte */ data[i] = comedi_offset_munge(s, val); } mutex_unlock(&devpriv->mut); return insn->n; } static int usbduxsigma_ao_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct usbduxsigma_private *devpriv = dev->private; int ret; mutex_lock(&devpriv->mut); ret = comedi_readback_insn_read(dev, s, insn, data); mutex_unlock(&devpriv->mut); return ret; } static int usbduxsigma_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct usbduxsigma_private *devpriv = dev->private; unsigned int chan = CR_CHAN(insn->chanspec); int ret; int i; mutex_lock(&devpriv->mut); if (devpriv->ao_cmd_running) { mutex_unlock(&devpriv->mut); return -EBUSY; } for (i = 0; i < insn->n; i++) { devpriv->dux_commands[1] = 1; /* num channels */ devpriv->dux_commands[2] = data[i]; /* value */ devpriv->dux_commands[3] = chan; /* channel number */ ret = usbbuxsigma_send_cmd(dev, USBDUXSIGMA_DA_CMD); if (ret < 0) { mutex_unlock(&devpriv->mut); return ret; } s->readback[chan] = data[i]; } mutex_unlock(&devpriv->mut); return insn->n; } static int usbduxsigma_ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trig_num) { struct usbduxsigma_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; int ret; if (trig_num != cmd->start_arg) return -EINVAL; mutex_lock(&devpriv->mut); if (!devpriv->ao_cmd_running) { devpriv->ao_cmd_running = 1; ret = usbduxsigma_submit_urbs(dev, devpriv->ao_urbs, devpriv->n_ao_urbs, 0); if (ret < 0) { devpriv->ao_cmd_running = 0; mutex_unlock(&devpriv->mut); return ret; } s->async->inttrig = NULL; } mutex_unlock(&devpriv->mut); return 1; } static int usbduxsigma_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { struct usbduxsigma_private *devpriv = dev->private; unsigned int tmp; int err = 0; /* Step 1 : check if triggers are trivially valid */ err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT); /* * For now, always use "scan" timing with all channels updated at once * (cmd->scan_begin_src == TRIG_TIMER, cmd->convert_src == TRIG_NOW). * * In a future version, "convert" timing with channels updated * indivually may be supported in high speed mode * (cmd->scan_begin_src == TRIG_FOLLOW, cmd->convert_src == TRIG_TIMER). */ err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER); err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW); err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE); if (err) { mutex_unlock(&devpriv->mut); return 1; } /* Step 2a : make sure trigger sources are unique */ err |= comedi_check_trigger_is_unique(cmd->start_src); err |= comedi_check_trigger_is_unique(cmd->stop_src); /* Step 2b : and mutually compatible */ if (err) return 2; /* Step 3: check if arguments are trivially valid */ err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0); err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, 1000000); err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); if (cmd->stop_src == TRIG_COUNT) err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1); else /* TRIG_NONE */ err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; /* Step 4: fix up any arguments */ tmp = rounddown(cmd->scan_begin_arg, 1000000); err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, tmp); if (err) return 4; return 0; } static int usbduxsigma_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct usbduxsigma_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; int ret; mutex_lock(&devpriv->mut); /* * For now, only "scan" timing is supported. A future version may * support "convert" timing in high speed mode. * * Timing of the scan: every 1ms all channels updated at once. */ devpriv->ao_timer = cmd->scan_begin_arg / 1000000; devpriv->ao_counter = devpriv->ao_timer; if (cmd->start_src == TRIG_NOW) { /* enable this acquisition operation */ devpriv->ao_cmd_running = 1; ret = usbduxsigma_submit_urbs(dev, devpriv->ao_urbs, devpriv->n_ao_urbs, 0); if (ret < 0) { devpriv->ao_cmd_running = 0; mutex_unlock(&devpriv->mut); return ret; } s->async->inttrig = NULL; } else { /* TRIG_INT */ s->async->inttrig = usbduxsigma_ao_inttrig; } mutex_unlock(&devpriv->mut); return 0; } static int usbduxsigma_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int ret; ret = comedi_dio_insn_config(dev, s, insn, data, 0); if (ret) return ret; /* * We don't tell the firmware here as it would take 8 frames * to submit the information. We do it in the (*insn_bits). */ return insn->n; } static int usbduxsigma_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct usbduxsigma_private *devpriv = dev->private; int ret; mutex_lock(&devpriv->mut); comedi_dio_update_state(s, data); /* Always update the hardware. See the (*insn_config). */ devpriv->dux_commands[1] = s->io_bits & 0xff; devpriv->dux_commands[4] = s->state & 0xff; devpriv->dux_commands[2] = (s->io_bits >> 8) & 0xff; devpriv->dux_commands[5] = (s->state >> 8) & 0xff; devpriv->dux_commands[3] = (s->io_bits >> 16) & 0xff; devpriv->dux_commands[6] = (s->state >> 16) & 0xff; ret = usbbuxsigma_send_cmd(dev, USBDUXSIGMA_DIO_BITS_CMD); if (ret < 0) goto done; ret = usbduxsigma_receive_cmd(dev, USBDUXSIGMA_DIO_BITS_CMD); if (ret < 0) goto done; s->state = devpriv->insn_buf[1] | (devpriv->insn_buf[2] << 8) | (devpriv->insn_buf[3] << 16); data[1] = s->state; ret = insn->n; done: mutex_unlock(&devpriv->mut); return ret; } static void usbduxsigma_pwm_stop(struct comedi_device *dev, int do_unlink) { struct usbduxsigma_private *devpriv = dev->private; if (do_unlink) { if (devpriv->pwm_urb) usb_kill_urb(devpriv->pwm_urb); } devpriv->pwm_cmd_running = 0; } static int usbduxsigma_pwm_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct usbduxsigma_private *devpriv = dev->private; /* unlink only if it is really running */ usbduxsigma_pwm_stop(dev, devpriv->pwm_cmd_running); return usbbuxsigma_send_cmd(dev, USBDUXSIGMA_PWM_OFF_CMD); } static void usbduxsigma_pwm_urb_complete(struct urb *urb) { struct comedi_device *dev = urb->context; struct usbduxsigma_private *devpriv = dev->private; int ret; switch (urb->status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -ECONNABORTED: /* happens after an unlink command */ if (devpriv->pwm_cmd_running) usbduxsigma_pwm_stop(dev, 0); /* w/o unlink */ return; default: /* a real error */ if (devpriv->pwm_cmd_running) { dev_err(dev->class_dev, "non-zero urb status (%d)\n", urb->status); usbduxsigma_pwm_stop(dev, 0); /* w/o unlink */ } return; } if (!devpriv->pwm_cmd_running) return; urb->transfer_buffer_length = devpriv->pwm_buf_sz; urb->dev = comedi_to_usb_dev(dev); urb->status = 0; ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) { dev_err(dev->class_dev, "urb resubmit failed (%d)\n", ret); if (ret == -EL2NSYNC) dev_err(dev->class_dev, "buggy USB host controller or bug in IRQ handler\n"); usbduxsigma_pwm_stop(dev, 0); /* w/o unlink */ } } static int usbduxsigma_submit_pwm_urb(struct comedi_device *dev) { struct usb_device *usb = comedi_to_usb_dev(dev); struct usbduxsigma_private *devpriv = dev->private; struct urb *urb = devpriv->pwm_urb; /* in case of a resubmission after an unlink... */ usb_fill_bulk_urb(urb, usb, usb_sndbulkpipe(usb, 4), urb->transfer_buffer, devpriv->pwm_buf_sz, usbduxsigma_pwm_urb_complete, dev); return usb_submit_urb(urb, GFP_ATOMIC); } static int usbduxsigma_pwm_period(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int period) { struct usbduxsigma_private *devpriv = dev->private; int fx2delay; if (period < MIN_PWM_PERIOD) return -EAGAIN; fx2delay = (period / (6 * 512 * 1000 / 33)) - 6; if (fx2delay > 255) return -EAGAIN; devpriv->pwm_delay = fx2delay; devpriv->pwm_period = period; return 0; } static int usbduxsigma_pwm_start(struct comedi_device *dev, struct comedi_subdevice *s) { struct usbduxsigma_private *devpriv = dev->private; int ret; if (devpriv->pwm_cmd_running) return 0; devpriv->dux_commands[1] = devpriv->pwm_delay; ret = usbbuxsigma_send_cmd(dev, USBDUXSIGMA_PWM_ON_CMD); if (ret < 0) return ret; memset(devpriv->pwm_urb->transfer_buffer, 0, devpriv->pwm_buf_sz); devpriv->pwm_cmd_running = 1; ret = usbduxsigma_submit_pwm_urb(dev); if (ret < 0) { devpriv->pwm_cmd_running = 0; return ret; } return 0; } static void usbduxsigma_pwm_pattern(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int chan, unsigned int value, unsigned int sign) { struct usbduxsigma_private *devpriv = dev->private; char pwm_mask = (1 << chan); /* DIO bit for the PWM data */ char sgn_mask = (16 << chan); /* DIO bit for the sign */ char *buf = (char *)(devpriv->pwm_urb->transfer_buffer); int szbuf = devpriv->pwm_buf_sz; int i; for (i = 0; i < szbuf; i++) { char c = *buf; c &= ~pwm_mask; if (i < value) c |= pwm_mask; if (!sign) c &= ~sgn_mask; else c |= sgn_mask; *buf++ = c; } } static int usbduxsigma_pwm_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); /* * It doesn't make sense to support more than one value here * because it would just overwrite the PWM buffer. */ if (insn->n != 1) return -EINVAL; /* * The sign is set via a special INSN only, this gives us 8 bits * for normal operation, sign is 0 by default. */ usbduxsigma_pwm_pattern(dev, s, chan, data[0], 0); return insn->n; } static int usbduxsigma_pwm_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct usbduxsigma_private *devpriv = dev->private; unsigned int chan = CR_CHAN(insn->chanspec); switch (data[0]) { case INSN_CONFIG_ARM: /* * if not zero the PWM is limited to a certain time which is * not supported here */ if (data[1] != 0) return -EINVAL; return usbduxsigma_pwm_start(dev, s); case INSN_CONFIG_DISARM: return usbduxsigma_pwm_cancel(dev, s); case INSN_CONFIG_GET_PWM_STATUS: data[1] = devpriv->pwm_cmd_running; return 0; case INSN_CONFIG_PWM_SET_PERIOD: return usbduxsigma_pwm_period(dev, s, data[1]); case INSN_CONFIG_PWM_GET_PERIOD: data[1] = devpriv->pwm_period; return 0; case INSN_CONFIG_PWM_SET_H_BRIDGE: /* * data[1] = value * data[2] = sign (for a relay) */ usbduxsigma_pwm_pattern(dev, s, chan, data[1], (data[2] != 0)); return 0; case INSN_CONFIG_PWM_GET_H_BRIDGE: /* values are not kept in this driver, nothing to return */ return -EINVAL; } return -EINVAL; } static int usbduxsigma_getstatusinfo(struct comedi_device *dev, int chan) { struct comedi_subdevice *s = dev->read_subdev; struct usbduxsigma_private *devpriv = dev->private; u8 sysred; u32 val; int ret; switch (chan) { default: case 0: sysred = 0; /* ADC zero */ break; case 1: sysred = 1; /* ADC offset */ break; case 2: sysred = 4; /* VCC */ break; case 3: sysred = 8; /* temperature */ break; case 4: sysred = 16; /* gain */ break; case 5: sysred = 32; /* ref */ break; } devpriv->dux_commands[1] = 0x12; /* CONFIG0 */ devpriv->dux_commands[2] = 0x80; /* CONFIG1: 2kHz sampling rate */ devpriv->dux_commands[3] = 0x00; /* CONFIG3: diff. channels off */ devpriv->dux_commands[4] = 0; devpriv->dux_commands[5] = 0; devpriv->dux_commands[6] = sysred; ret = usbbuxsigma_send_cmd(dev, USBDUXSIGMA_SINGLE_AD_CMD); if (ret < 0) return ret; ret = usbduxsigma_receive_cmd(dev, USBDUXSIGMA_SINGLE_AD_CMD); if (ret < 0) return ret; /* 32 bits big endian from the A/D converter */ val = be32_to_cpu(get_unaligned((__be32 *)(devpriv->insn_buf + 1))); val &= 0x00ffffff; /* strip status byte */ return (int)comedi_offset_munge(s, val); } static int usbduxsigma_firmware_upload(struct comedi_device *dev, const u8 *data, size_t size, unsigned long context) { struct usb_device *usb = comedi_to_usb_dev(dev); u8 *buf; u8 *tmp; int ret; if (!data) return 0; if (size > FIRMWARE_MAX_LEN) { dev_err(dev->class_dev, "firmware binary too large for FX2\n"); return -ENOMEM; } /* we generate a local buffer for the firmware */ buf = kmemdup(data, size, GFP_KERNEL); if (!buf) return -ENOMEM; /* we need a malloc'ed buffer for usb_control_msg() */ tmp = kmalloc(1, GFP_KERNEL); if (!tmp) { kfree(buf); return -ENOMEM; } /* stop the current firmware on the device */ *tmp = 1; /* 7f92 to one */ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0), USBDUXSUB_FIRMWARE, VENDOR_DIR_OUT, USBDUXSUB_CPUCS, 0x0000, tmp, 1, BULK_TIMEOUT); if (ret < 0) { dev_err(dev->class_dev, "can not stop firmware\n"); goto done; } /* upload the new firmware to the device */ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0), USBDUXSUB_FIRMWARE, VENDOR_DIR_OUT, 0, 0x0000, buf, size, BULK_TIMEOUT); if (ret < 0) { dev_err(dev->class_dev, "firmware upload failed\n"); goto done; } /* start the new firmware on the device */ *tmp = 0; /* 7f92 to zero */ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0), USBDUXSUB_FIRMWARE, VENDOR_DIR_OUT, USBDUXSUB_CPUCS, 0x0000, tmp, 1, BULK_TIMEOUT); if (ret < 0) dev_err(dev->class_dev, "can not start firmware\n"); done: kfree(tmp); kfree(buf); return ret; } static int usbduxsigma_alloc_usb_buffers(struct comedi_device *dev) { struct usb_device *usb = comedi_to_usb_dev(dev); struct usbduxsigma_private *devpriv = dev->private; struct urb *urb; int i; devpriv->dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL); devpriv->in_buf = kzalloc(SIZEINBUF, GFP_KERNEL); devpriv->insn_buf = kzalloc(SIZEINSNBUF, GFP_KERNEL); devpriv->ai_urbs = kcalloc(devpriv->n_ai_urbs, sizeof(urb), GFP_KERNEL); devpriv->ao_urbs = kcalloc(devpriv->n_ao_urbs, sizeof(urb), GFP_KERNEL); if (!devpriv->dux_commands || !devpriv->in_buf || !devpriv->insn_buf || !devpriv->ai_urbs || !devpriv->ao_urbs) return -ENOMEM; for (i = 0; i < devpriv->n_ai_urbs; i++) { /* one frame: 1ms */ urb = usb_alloc_urb(1, GFP_KERNEL); if (!urb) return -ENOMEM; devpriv->ai_urbs[i] = urb; urb->dev = usb; /* will be filled later with a pointer to the comedi-device */ /* and ONLY then the urb should be submitted */ urb->context = NULL; urb->pipe = usb_rcvisocpipe(usb, 6); urb->transfer_flags = URB_ISO_ASAP; urb->transfer_buffer = kzalloc(SIZEINBUF, GFP_KERNEL); if (!urb->transfer_buffer) return -ENOMEM; urb->complete = usbduxsigma_ai_urb_complete; urb->number_of_packets = 1; urb->transfer_buffer_length = SIZEINBUF; urb->iso_frame_desc[0].offset = 0; urb->iso_frame_desc[0].length = SIZEINBUF; } for (i = 0; i < devpriv->n_ao_urbs; i++) { /* one frame: 1ms */ urb = usb_alloc_urb(1, GFP_KERNEL); if (!urb) return -ENOMEM; devpriv->ao_urbs[i] = urb; urb->dev = usb; /* will be filled later with a pointer to the comedi-device */ /* and ONLY then the urb should be submitted */ urb->context = NULL; urb->pipe = usb_sndisocpipe(usb, 2); urb->transfer_flags = URB_ISO_ASAP; urb->transfer_buffer = kzalloc(SIZEOUTBUF, GFP_KERNEL); if (!urb->transfer_buffer) return -ENOMEM; urb->complete = usbduxsigma_ao_urb_complete; urb->number_of_packets = 1; urb->transfer_buffer_length = SIZEOUTBUF; urb->iso_frame_desc[0].offset = 0; urb->iso_frame_desc[0].length = SIZEOUTBUF; urb->interval = 1; /* (u)frames */ } if (devpriv->pwm_buf_sz) { urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; devpriv->pwm_urb = urb; urb->transfer_buffer = kzalloc(devpriv->pwm_buf_sz, GFP_KERNEL); if (!urb->transfer_buffer) return -ENOMEM; } return 0; } static void usbduxsigma_free_usb_buffers(struct comedi_device *dev) { struct usbduxsigma_private *devpriv = dev->private; struct urb *urb; int i; urb = devpriv->pwm_urb; if (urb) { kfree(urb->transfer_buffer); usb_free_urb(urb); } if (devpriv->ao_urbs) { for (i = 0; i < devpriv->n_ao_urbs; i++) { urb = devpriv->ao_urbs[i]; if (urb) { kfree(urb->transfer_buffer); usb_free_urb(urb); } } kfree(devpriv->ao_urbs); } if (devpriv->ai_urbs) { for (i = 0; i < devpriv->n_ai_urbs; i++) { urb = devpriv->ai_urbs[i]; if (urb) { kfree(urb->transfer_buffer); usb_free_urb(urb); } } kfree(devpriv->ai_urbs); } kfree(devpriv->insn_buf); kfree(devpriv->in_buf); kfree(devpriv->dux_commands); } static int usbduxsigma_auto_attach(struct comedi_device *dev, unsigned long context_unused) { struct usb_interface *intf = comedi_to_usb_interface(dev); struct usb_device *usb = comedi_to_usb_dev(dev); struct usbduxsigma_private *devpriv; struct comedi_subdevice *s; int offset; int ret; devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) return -ENOMEM; mutex_init(&devpriv->mut); usb_set_intfdata(intf, devpriv); devpriv->high_speed = (usb->speed == USB_SPEED_HIGH); if (devpriv->high_speed) { devpriv->n_ai_urbs = NUMOFINBUFFERSHIGH; devpriv->n_ao_urbs = NUMOFOUTBUFFERSHIGH; devpriv->pwm_buf_sz = 512; } else { devpriv->n_ai_urbs = NUMOFINBUFFERSFULL; devpriv->n_ao_urbs = NUMOFOUTBUFFERSFULL; } ret = usbduxsigma_alloc_usb_buffers(dev); if (ret) return ret; /* setting to alternate setting 3: enabling iso ep and bulk ep. */ ret = usb_set_interface(usb, intf->altsetting->desc.bInterfaceNumber, 3); if (ret < 0) { dev_err(dev->class_dev, "could not set alternate setting 3 in high speed\n"); return ret; } ret = comedi_load_firmware(dev, &usb->dev, FIRMWARE, usbduxsigma_firmware_upload, 0); if (ret) return ret; ret = comedi_alloc_subdevices(dev, (devpriv->high_speed) ? 4 : 3); if (ret) return ret; /* Analog Input subdevice */ s = &dev->subdevices[0]; dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ | SDF_LSAMPL; s->n_chan = NUMCHANNELS; s->len_chanlist = NUMCHANNELS; s->maxdata = 0x00ffffff; s->range_table = &usbduxsigma_ai_range; s->insn_read = usbduxsigma_ai_insn_read; s->do_cmdtest = usbduxsigma_ai_cmdtest; s->do_cmd = usbduxsigma_ai_cmd; s->cancel = usbduxsigma_ai_cancel; /* Analog Output subdevice */ s = &dev->subdevices[1]; dev->write_subdev = s; s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE; s->n_chan = 4; s->len_chanlist = s->n_chan; s->maxdata = 0x00ff; s->range_table = &range_unipolar2_5; s->insn_write = usbduxsigma_ao_insn_write; s->insn_read = usbduxsigma_ao_insn_read; s->do_cmdtest = usbduxsigma_ao_cmdtest; s->do_cmd = usbduxsigma_ao_cmd; s->cancel = usbduxsigma_ao_cancel; ret = comedi_alloc_subdev_readback(s); if (ret) return ret; /* Digital I/O subdevice */ s = &dev->subdevices[2]; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 24; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = usbduxsigma_dio_insn_bits; s->insn_config = usbduxsigma_dio_insn_config; if (devpriv->high_speed) { /* Timer / pwm subdevice */ s = &dev->subdevices[3]; s->type = COMEDI_SUBD_PWM; s->subdev_flags = SDF_WRITABLE | SDF_PWM_HBRIDGE; s->n_chan = 8; s->maxdata = devpriv->pwm_buf_sz; s->insn_write = usbduxsigma_pwm_write; s->insn_config = usbduxsigma_pwm_config; usbduxsigma_pwm_period(dev, s, PWM_DEFAULT_PERIOD); } offset = usbduxsigma_getstatusinfo(dev, 0); if (offset < 0) { dev_err(dev->class_dev, "Communication to USBDUXSIGMA failed! Check firmware and cabling.\n"); return offset; } dev_info(dev->class_dev, "ADC_zero = %x\n", offset); return 0; } static void usbduxsigma_detach(struct comedi_device *dev) { struct usb_interface *intf = comedi_to_usb_interface(dev); struct usbduxsigma_private *devpriv = dev->private; usb_set_intfdata(intf, NULL); if (!devpriv) return; mutex_lock(&devpriv->mut); /* force unlink all urbs */ usbduxsigma_ai_stop(dev, 1); usbduxsigma_ao_stop(dev, 1); usbduxsigma_pwm_stop(dev, 1); usbduxsigma_free_usb_buffers(dev); mutex_unlock(&devpriv->mut); mutex_destroy(&devpriv->mut); } static struct comedi_driver usbduxsigma_driver = { .driver_name = "usbduxsigma", .module = THIS_MODULE, .auto_attach = usbduxsigma_auto_attach, .detach = usbduxsigma_detach, }; static int usbduxsigma_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { return comedi_usb_auto_config(intf, &usbduxsigma_driver, 0); } static const struct usb_device_id usbduxsigma_usb_table[] = { { USB_DEVICE(0x13d8, 0x0020) }, { USB_DEVICE(0x13d8, 0x0021) }, { USB_DEVICE(0x13d8, 0x0022) }, { } }; MODULE_DEVICE_TABLE(usb, usbduxsigma_usb_table); static struct usb_driver usbduxsigma_usb_driver = { .name = "usbduxsigma", .probe = usbduxsigma_usb_probe, .disconnect = comedi_usb_auto_unconfig, .id_table = usbduxsigma_usb_table, }; module_comedi_usb_driver(usbduxsigma_driver, usbduxsigma_usb_driver); MODULE_AUTHOR("Bernd Porr, mail@berndporr.me.uk"); MODULE_DESCRIPTION("Stirling/ITL USB-DUX SIGMA -- mail@berndporr.me.uk"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FIRMWARE);
106 107 89 1 89 1 1 119 179 5 152 55 140 87 3 3 3 1 3 140 67 141 148 149 150 70 119 143 30 141 70 120 87 47 88 92 92 93 21 18 88 24 78 65 67 67 17 60 60 60 23 23 23 7 20 20 11 11 11 11 7 7 3 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 // SPDX-License-Identifier: GPL-2.0 #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <linux/netpoll.h> #include <linux/export.h> #include <net/gro.h> #include "vlan.h" bool vlan_do_receive(struct sk_buff **skbp) { struct sk_buff *skb = *skbp; __be16 vlan_proto = skb->vlan_proto; u16 vlan_id = skb_vlan_tag_get_id(skb); struct net_device *vlan_dev; struct vlan_pcpu_stats *rx_stats; vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id); if (!vlan_dev) return false; skb = *skbp = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return false; if (unlikely(!(vlan_dev->flags & IFF_UP))) { kfree_skb(skb); *skbp = NULL; return false; } skb->dev = vlan_dev; if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) { /* Our lower layer thinks this is not local, let's make sure. * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr)) skb->pkt_type = PACKET_HOST; } if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) && !netif_is_macvlan_port(vlan_dev) && !netif_is_bridge_port(vlan_dev)) { unsigned int offset = skb->data - skb_mac_header(skb); /* * vlan_insert_tag expect skb->data pointing to mac header. * So change skb->data before calling it and change back to * original position later */ skb_push(skb, offset); skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto, skb->vlan_tci, skb->mac_len); if (!skb) return false; skb_pull(skb, offset + VLAN_HLEN); skb_reset_mac_len(skb); } skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); __vlan_hwaccel_clear_tag(skb); rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats); u64_stats_update_begin(&rx_stats->syncp); u64_stats_inc(&rx_stats->rx_packets); u64_stats_add(&rx_stats->rx_bytes, skb->len); if (skb->pkt_type == PACKET_MULTICAST) u64_stats_inc(&rx_stats->rx_multicast); u64_stats_update_end(&rx_stats->syncp); return true; } /* Must be invoked with rcu_read_lock. */ struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev, __be16 vlan_proto, u16 vlan_id) { struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info); if (vlan_info) { return vlan_group_get_device(&vlan_info->grp, vlan_proto, vlan_id); } else { /* * Lower devices of master uppers (bonding, team) do not have * grp assigned to themselves. Grp is assigned to upper device * instead. */ struct net_device *upper_dev; upper_dev = netdev_master_upper_dev_get_rcu(dev); if (upper_dev) return __vlan_find_dev_deep_rcu(upper_dev, vlan_proto, vlan_id); } return NULL; } EXPORT_SYMBOL(__vlan_find_dev_deep_rcu); struct net_device *vlan_dev_real_dev(const struct net_device *dev) { struct net_device *ret = vlan_dev_priv(dev)->real_dev; while (is_vlan_dev(ret)) ret = vlan_dev_priv(ret)->real_dev; return ret; } EXPORT_SYMBOL(vlan_dev_real_dev); u16 vlan_dev_vlan_id(const struct net_device *dev) { return vlan_dev_priv(dev)->vlan_id; } EXPORT_SYMBOL(vlan_dev_vlan_id); __be16 vlan_dev_vlan_proto(const struct net_device *dev) { return vlan_dev_priv(dev)->vlan_proto; } EXPORT_SYMBOL(vlan_dev_vlan_proto); /* * vlan info and vid list */ static void vlan_group_free(struct vlan_group *grp) { int i, j; for (i = 0; i < VLAN_PROTO_NUM; i++) for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++) kfree(grp->vlan_devices_arrays[i][j]); } static void vlan_info_free(struct vlan_info *vlan_info) { vlan_group_free(&vlan_info->grp); kfree(vlan_info); } static void vlan_info_rcu_free(struct rcu_head *rcu) { vlan_info_free(container_of(rcu, struct vlan_info, rcu)); } static struct vlan_info *vlan_info_alloc(struct net_device *dev) { struct vlan_info *vlan_info; vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL); if (!vlan_info) return NULL; vlan_info->real_dev = dev; INIT_LIST_HEAD(&vlan_info->vid_list); return vlan_info; } struct vlan_vid_info { struct list_head list; __be16 proto; u16 vid; int refcount; }; static bool vlan_hw_filter_capable(const struct net_device *dev, __be16 proto) { if (proto == htons(ETH_P_8021Q) && dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) return true; if (proto == htons(ETH_P_8021AD) && dev->features & NETIF_F_HW_VLAN_STAG_FILTER) return true; return false; } static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info, __be16 proto, u16 vid) { struct vlan_vid_info *vid_info; list_for_each_entry(vid_info, &vlan_info->vid_list, list) { if (vid_info->proto == proto && vid_info->vid == vid) return vid_info; } return NULL; } static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid) { struct vlan_vid_info *vid_info; vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL); if (!vid_info) return NULL; vid_info->proto = proto; vid_info->vid = vid; return vid_info; } static int vlan_add_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid) { if (!vlan_hw_filter_capable(dev, proto)) return 0; if (netif_device_present(dev)) return dev->netdev_ops->ndo_vlan_rx_add_vid(dev, proto, vid); else return -ENODEV; } static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid) { if (!vlan_hw_filter_capable(dev, proto)) return 0; if (netif_device_present(dev)) return dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, proto, vid); else return -ENODEV; } int vlan_for_each(struct net_device *dev, int (*action)(struct net_device *dev, int vid, void *arg), void *arg) { struct vlan_vid_info *vid_info; struct vlan_info *vlan_info; struct net_device *vdev; int ret; ASSERT_RTNL(); vlan_info = rtnl_dereference(dev->vlan_info); if (!vlan_info) return 0; list_for_each_entry(vid_info, &vlan_info->vid_list, list) { vdev = vlan_group_get_device(&vlan_info->grp, vid_info->proto, vid_info->vid); ret = action(vdev, vid_info->vid, arg); if (ret) return ret; } return 0; } EXPORT_SYMBOL(vlan_for_each); int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto) { struct net_device *real_dev = vlan_info->real_dev; struct vlan_vid_info *vlan_vid_info; int err; list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) { if (vlan_vid_info->proto == proto) { err = vlan_add_rx_filter_info(real_dev, proto, vlan_vid_info->vid); if (err) goto unwind; } } return 0; unwind: list_for_each_entry_continue_reverse(vlan_vid_info, &vlan_info->vid_list, list) { if (vlan_vid_info->proto == proto) vlan_kill_rx_filter_info(real_dev, proto, vlan_vid_info->vid); } return err; } EXPORT_SYMBOL(vlan_filter_push_vids); void vlan_filter_drop_vids(struct vlan_info *vlan_info, __be16 proto) { struct vlan_vid_info *vlan_vid_info; list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) if (vlan_vid_info->proto == proto) vlan_kill_rx_filter_info(vlan_info->real_dev, vlan_vid_info->proto, vlan_vid_info->vid); } EXPORT_SYMBOL(vlan_filter_drop_vids); static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid, struct vlan_vid_info **pvid_info) { struct net_device *dev = vlan_info->real_dev; struct vlan_vid_info *vid_info; int err; vid_info = vlan_vid_info_alloc(proto, vid); if (!vid_info) return -ENOMEM; err = vlan_add_rx_filter_info(dev, proto, vid); if (err) { kfree(vid_info); return err; } list_add(&vid_info->list, &vlan_info->vid_list); vlan_info->nr_vids++; *pvid_info = vid_info; return 0; } int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) { struct vlan_info *vlan_info; struct vlan_vid_info *vid_info; bool vlan_info_created = false; int err; ASSERT_RTNL(); vlan_info = rtnl_dereference(dev->vlan_info); if (!vlan_info) { vlan_info = vlan_info_alloc(dev); if (!vlan_info) return -ENOMEM; vlan_info_created = true; } vid_info = vlan_vid_info_get(vlan_info, proto, vid); if (!vid_info) { err = __vlan_vid_add(vlan_info, proto, vid, &vid_info); if (err) goto out_free_vlan_info; } vid_info->refcount++; if (vlan_info_created) rcu_assign_pointer(dev->vlan_info, vlan_info); return 0; out_free_vlan_info: if (vlan_info_created) kfree(vlan_info); return err; } EXPORT_SYMBOL(vlan_vid_add); static void __vlan_vid_del(struct vlan_info *vlan_info, struct vlan_vid_info *vid_info) { struct net_device *dev = vlan_info->real_dev; __be16 proto = vid_info->proto; u16 vid = vid_info->vid; int err; err = vlan_kill_rx_filter_info(dev, proto, vid); if (err && dev->reg_state != NETREG_UNREGISTERING) netdev_warn(dev, "failed to kill vid %04x/%d\n", proto, vid); list_del(&vid_info->list); kfree(vid_info); vlan_info->nr_vids--; } void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) { struct vlan_info *vlan_info; struct vlan_vid_info *vid_info; ASSERT_RTNL(); vlan_info = rtnl_dereference(dev->vlan_info); if (!vlan_info) return; vid_info = vlan_vid_info_get(vlan_info, proto, vid); if (!vid_info) return; vid_info->refcount--; if (vid_info->refcount == 0) { __vlan_vid_del(vlan_info, vid_info); if (vlan_info->nr_vids == 0) { RCU_INIT_POINTER(dev->vlan_info, NULL); call_rcu(&vlan_info->rcu, vlan_info_rcu_free); } } } EXPORT_SYMBOL(vlan_vid_del); int vlan_vids_add_by_dev(struct net_device *dev, const struct net_device *by_dev) { struct vlan_vid_info *vid_info; struct vlan_info *vlan_info; int err; ASSERT_RTNL(); vlan_info = rtnl_dereference(by_dev->vlan_info); if (!vlan_info) return 0; list_for_each_entry(vid_info, &vlan_info->vid_list, list) { if (!vlan_hw_filter_capable(by_dev, vid_info->proto)) continue; err = vlan_vid_add(dev, vid_info->proto, vid_info->vid); if (err) goto unwind; } return 0; unwind: list_for_each_entry_continue_reverse(vid_info, &vlan_info->vid_list, list) { if (!vlan_hw_filter_capable(by_dev, vid_info->proto)) continue; vlan_vid_del(dev, vid_info->proto, vid_info->vid); } return err; } EXPORT_SYMBOL(vlan_vids_add_by_dev); void vlan_vids_del_by_dev(struct net_device *dev, const struct net_device *by_dev) { struct vlan_vid_info *vid_info; struct vlan_info *vlan_info; ASSERT_RTNL(); vlan_info = rtnl_dereference(by_dev->vlan_info); if (!vlan_info) return; list_for_each_entry(vid_info, &vlan_info->vid_list, list) { if (!vlan_hw_filter_capable(by_dev, vid_info->proto)) continue; vlan_vid_del(dev, vid_info->proto, vid_info->vid); } } EXPORT_SYMBOL(vlan_vids_del_by_dev); bool vlan_uses_dev(const struct net_device *dev) { struct vlan_info *vlan_info; ASSERT_RTNL(); vlan_info = rtnl_dereference(dev->vlan_info); if (!vlan_info) return false; return vlan_info->grp.nr_vlan_devs ? true : false; } EXPORT_SYMBOL(vlan_uses_dev); static struct sk_buff *vlan_gro_receive(struct list_head *head, struct sk_buff *skb) { const struct packet_offload *ptype; unsigned int hlen, off_vlan; struct sk_buff *pp = NULL; struct vlan_hdr *vhdr; struct sk_buff *p; __be16 type; int flush = 1; off_vlan = skb_gro_offset(skb); hlen = off_vlan + sizeof(*vhdr); vhdr = skb_gro_header(skb, hlen, off_vlan); if (unlikely(!vhdr)) goto out; NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = hlen; type = vhdr->h_vlan_encapsulated_proto; ptype = gro_find_receive_by_type(type); if (!ptype) goto out; flush = 0; list_for_each_entry(p, head, list) { struct vlan_hdr *vhdr2; if (!NAPI_GRO_CB(p)->same_flow) continue; vhdr2 = (struct vlan_hdr *)(p->data + off_vlan); if (compare_vlan_header(vhdr, vhdr2)) NAPI_GRO_CB(p)->same_flow = 0; } skb_gro_pull(skb, sizeof(*vhdr)); skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr)); pp = indirect_call_gro_receive_inet(ptype->callbacks.gro_receive, ipv6_gro_receive, inet_gro_receive, head, skb); out: skb_gro_flush_final(skb, pp, flush); return pp; } static int vlan_gro_complete(struct sk_buff *skb, int nhoff) { struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff); __be16 type = vhdr->h_vlan_encapsulated_proto; struct packet_offload *ptype; int err = -ENOENT; ptype = gro_find_complete_by_type(type); if (ptype) err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, ipv6_gro_complete, inet_gro_complete, skb, nhoff + sizeof(*vhdr)); return err; } static struct packet_offload vlan_packet_offloads[] __read_mostly = { { .type = cpu_to_be16(ETH_P_8021Q), .priority = 10, .callbacks = { .gro_receive = vlan_gro_receive, .gro_complete = vlan_gro_complete, }, }, { .type = cpu_to_be16(ETH_P_8021AD), .priority = 10, .callbacks = { .gro_receive = vlan_gro_receive, .gro_complete = vlan_gro_complete, }, }, }; static int __init vlan_offload_init(void) { unsigned int i; for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++) dev_add_offload(&vlan_packet_offloads[i]); return 0; } fs_initcall(vlan_offload_init);
7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 // SPDX-License-Identifier: GPL-2.0-only #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/export.h> #include <linux/delay.h> #include <linux/hpet.h> #include <linux/cpu.h> #include <linux/irq.h> #include <asm/cpuid/api.h> #include <asm/irq_remapping.h> #include <asm/hpet.h> #include <asm/time.h> #include <asm/mwait.h> #include <asm/msr.h> #undef pr_fmt #define pr_fmt(fmt) "hpet: " fmt enum hpet_mode { HPET_MODE_UNUSED, HPET_MODE_LEGACY, HPET_MODE_CLOCKEVT, HPET_MODE_DEVICE, }; struct hpet_channel { struct clock_event_device evt; unsigned int num; unsigned int cpu; unsigned int irq; unsigned int in_use; enum hpet_mode mode; unsigned int boot_cfg; char name[10]; }; struct hpet_base { unsigned int nr_channels; unsigned int nr_clockevents; unsigned int boot_cfg; struct hpet_channel *channels; }; #define HPET_MASK CLOCKSOURCE_MASK(32) #define HPET_MIN_CYCLES 128 #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1)) /* * HPET address is set in acpi/boot.c, when an ACPI entry exists */ unsigned long hpet_address; u8 hpet_blockid; /* OS timer block num */ bool hpet_msi_disable; #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_GENERIC_MSI_IRQ) static DEFINE_PER_CPU(struct hpet_channel *, cpu_hpet_channel); static struct irq_domain *hpet_domain; #endif static void __iomem *hpet_virt_address; static struct hpet_base hpet_base; static bool hpet_legacy_int_enabled; static unsigned long hpet_freq; bool boot_hpet_disable; bool hpet_force_user; static bool hpet_verbose; static inline struct hpet_channel *clockevent_to_channel(struct clock_event_device *evt) { return container_of(evt, struct hpet_channel, evt); } inline unsigned int hpet_readl(unsigned int a) { return readl(hpet_virt_address + a); } static inline void hpet_writel(unsigned int d, unsigned int a) { writel(d, hpet_virt_address + a); } static inline void hpet_set_mapping(void) { hpet_virt_address = ioremap(hpet_address, HPET_MMAP_SIZE); } static inline void hpet_clear_mapping(void) { iounmap(hpet_virt_address); hpet_virt_address = NULL; } /* * HPET command line enable / disable */ static int __init hpet_setup(char *str) { while (str) { char *next = strchr(str, ','); if (next) *next++ = 0; if (!strncmp("disable", str, 7)) boot_hpet_disable = true; if (!strncmp("force", str, 5)) hpet_force_user = true; if (!strncmp("verbose", str, 7)) hpet_verbose = true; str = next; } return 1; } __setup("hpet=", hpet_setup); static int __init disable_hpet(char *str) { boot_hpet_disable = true; return 1; } __setup("nohpet", disable_hpet); static inline int is_hpet_capable(void) { return !boot_hpet_disable && hpet_address; } /** * is_hpet_enabled - Check whether the legacy HPET timer interrupt is enabled */ int is_hpet_enabled(void) { return is_hpet_capable() && hpet_legacy_int_enabled; } EXPORT_SYMBOL_GPL(is_hpet_enabled); static void _hpet_print_config(const char *function, int line) { u32 i, id, period, cfg, status, channels, l, h; pr_info("%s(%d):\n", function, line); id = hpet_readl(HPET_ID); period = hpet_readl(HPET_PERIOD); pr_info("ID: 0x%x, PERIOD: 0x%x\n", id, period); cfg = hpet_readl(HPET_CFG); status = hpet_readl(HPET_STATUS); pr_info("CFG: 0x%x, STATUS: 0x%x\n", cfg, status); l = hpet_readl(HPET_COUNTER); h = hpet_readl(HPET_COUNTER+4); pr_info("COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h); channels = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; for (i = 0; i < channels; i++) { l = hpet_readl(HPET_Tn_CFG(i)); h = hpet_readl(HPET_Tn_CFG(i)+4); pr_info("T%d: CFG_l: 0x%x, CFG_h: 0x%x\n", i, l, h); l = hpet_readl(HPET_Tn_CMP(i)); h = hpet_readl(HPET_Tn_CMP(i)+4); pr_info("T%d: CMP_l: 0x%x, CMP_h: 0x%x\n", i, l, h); l = hpet_readl(HPET_Tn_ROUTE(i)); h = hpet_readl(HPET_Tn_ROUTE(i)+4); pr_info("T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n", i, l, h); } } #define hpet_print_config() \ do { \ if (hpet_verbose) \ _hpet_print_config(__func__, __LINE__); \ } while (0) /* * When the HPET driver (/dev/hpet) is enabled, we need to reserve * timer 0 and timer 1 in case of RTC emulation. */ #ifdef CONFIG_HPET static void __init hpet_reserve_platform_timers(void) { struct hpet_data hd; unsigned int i; memset(&hd, 0, sizeof(hd)); hd.hd_phys_address = hpet_address; hd.hd_address = hpet_virt_address; hd.hd_nirqs = hpet_base.nr_channels; /* * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254 * is wrong for i8259!) not the output IRQ. Many BIOS writers * don't bother configuring *any* comparator interrupts. */ hd.hd_irq[0] = HPET_LEGACY_8254; hd.hd_irq[1] = HPET_LEGACY_RTC; for (i = 0; i < hpet_base.nr_channels; i++) { struct hpet_channel *hc = hpet_base.channels + i; if (i >= 2) hd.hd_irq[i] = hc->irq; switch (hc->mode) { case HPET_MODE_UNUSED: case HPET_MODE_DEVICE: hc->mode = HPET_MODE_DEVICE; break; case HPET_MODE_CLOCKEVT: case HPET_MODE_LEGACY: hpet_reserve_timer(&hd, hc->num); break; } } hpet_alloc(&hd); } static void __init hpet_select_device_channel(void) { int i; for (i = 0; i < hpet_base.nr_channels; i++) { struct hpet_channel *hc = hpet_base.channels + i; /* Associate the first unused channel to /dev/hpet */ if (hc->mode == HPET_MODE_UNUSED) { hc->mode = HPET_MODE_DEVICE; return; } } } #else static inline void hpet_reserve_platform_timers(void) { } static inline void hpet_select_device_channel(void) {} #endif /* Common HPET functions */ static void hpet_stop_counter(void) { u32 cfg = hpet_readl(HPET_CFG); cfg &= ~HPET_CFG_ENABLE; hpet_writel(cfg, HPET_CFG); } static void hpet_reset_counter(void) { hpet_writel(0, HPET_COUNTER); hpet_writel(0, HPET_COUNTER + 4); } static void hpet_start_counter(void) { unsigned int cfg = hpet_readl(HPET_CFG); cfg |= HPET_CFG_ENABLE; hpet_writel(cfg, HPET_CFG); } static void hpet_restart_counter(void) { hpet_stop_counter(); hpet_reset_counter(); hpet_start_counter(); } static void hpet_resume_device(void) { force_hpet_resume(); } static void hpet_resume_counter(struct clocksource *cs) { hpet_resume_device(); hpet_restart_counter(); } static void hpet_enable_legacy_int(void) { unsigned int cfg = hpet_readl(HPET_CFG); cfg |= HPET_CFG_LEGACY; hpet_writel(cfg, HPET_CFG); hpet_legacy_int_enabled = true; } static int hpet_clkevt_set_state_periodic(struct clock_event_device *evt) { unsigned int channel = clockevent_to_channel(evt)->num; unsigned int cfg, cmp, now; uint64_t delta; hpet_stop_counter(); delta = ((uint64_t)(NSEC_PER_SEC / HZ)) * evt->mult; delta >>= evt->shift; now = hpet_readl(HPET_COUNTER); cmp = now + (unsigned int)delta; cfg = hpet_readl(HPET_Tn_CFG(channel)); cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL | HPET_TN_32BIT; hpet_writel(cfg, HPET_Tn_CFG(channel)); hpet_writel(cmp, HPET_Tn_CMP(channel)); udelay(1); /* * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL * bit is automatically cleared after the first write. * (See AMD-8111 HyperTransport I/O Hub Data Sheet, * Publication # 24674) */ hpet_writel((unsigned int)delta, HPET_Tn_CMP(channel)); hpet_start_counter(); hpet_print_config(); return 0; } static int hpet_clkevt_set_state_oneshot(struct clock_event_device *evt) { unsigned int channel = clockevent_to_channel(evt)->num; unsigned int cfg; cfg = hpet_readl(HPET_Tn_CFG(channel)); cfg &= ~HPET_TN_PERIODIC; cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; hpet_writel(cfg, HPET_Tn_CFG(channel)); return 0; } static int hpet_clkevt_set_state_shutdown(struct clock_event_device *evt) { unsigned int channel = clockevent_to_channel(evt)->num; unsigned int cfg; cfg = hpet_readl(HPET_Tn_CFG(channel)); cfg &= ~HPET_TN_ENABLE; hpet_writel(cfg, HPET_Tn_CFG(channel)); return 0; } static int hpet_clkevt_legacy_resume(struct clock_event_device *evt) { hpet_enable_legacy_int(); hpet_print_config(); return 0; } static int hpet_clkevt_set_next_event(unsigned long delta, struct clock_event_device *evt) { unsigned int channel = clockevent_to_channel(evt)->num; u32 cnt; s32 res; cnt = hpet_readl(HPET_COUNTER); cnt += (u32) delta; hpet_writel(cnt, HPET_Tn_CMP(channel)); /* * HPETs are a complete disaster. The compare register is * based on a equal comparison and neither provides a less * than or equal functionality (which would require to take * the wraparound into account) nor a simple count down event * mode. Further the write to the comparator register is * delayed internally up to two HPET clock cycles in certain * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even * longer delays. We worked around that by reading back the * compare register, but that required another workaround for * ICH9,10 chips where the first readout after write can * return the old stale value. We already had a minimum * programming delta of 5us enforced, but a NMI or SMI hitting * between the counter readout and the comparator write can * move us behind that point easily. Now instead of reading * the compare register back several times, we make the ETIME * decision based on the following: Return ETIME if the * counter value after the write is less than HPET_MIN_CYCLES * away from the event or if the counter is already ahead of * the event. The minimum programming delta for the generic * clockevents code is set to 1.5 * HPET_MIN_CYCLES. */ res = (s32)(cnt - hpet_readl(HPET_COUNTER)); return res < HPET_MIN_CYCLES ? -ETIME : 0; } static void hpet_init_clockevent(struct hpet_channel *hc, unsigned int rating) { struct clock_event_device *evt = &hc->evt; evt->rating = rating; evt->irq = hc->irq; evt->name = hc->name; evt->cpumask = cpumask_of(hc->cpu); evt->set_state_oneshot = hpet_clkevt_set_state_oneshot; evt->set_next_event = hpet_clkevt_set_next_event; evt->set_state_shutdown = hpet_clkevt_set_state_shutdown; evt->features = CLOCK_EVT_FEAT_ONESHOT; if (hc->boot_cfg & HPET_TN_PERIODIC) { evt->features |= CLOCK_EVT_FEAT_PERIODIC; evt->set_state_periodic = hpet_clkevt_set_state_periodic; } } static void __init hpet_legacy_clockevent_register(struct hpet_channel *hc) { /* * Start HPET with the boot CPU's cpumask and make it global after * the IO_APIC has been initialized. */ hc->cpu = boot_cpu_data.cpu_index; strscpy(hc->name, "hpet", sizeof(hc->name)); hpet_init_clockevent(hc, 50); hc->evt.tick_resume = hpet_clkevt_legacy_resume; /* * Legacy horrors and sins from the past. HPET used periodic mode * unconditionally forever on the legacy channel 0. Removing the * below hack and using the conditional in hpet_init_clockevent() * makes at least Qemu and one hardware machine fail to boot. * There are two issues which cause the boot failure: * * #1 After the timer delivery test in IOAPIC and the IOAPIC setup * the next interrupt is not delivered despite the HPET channel * being programmed correctly. Reprogramming the HPET after * switching to IOAPIC makes it work again. After fixing this, * the next issue surfaces: * * #2 Due to the unconditional periodic mode availability the Local * APIC timer calibration can hijack the global clockevents * event handler without causing damage. Using oneshot at this * stage makes if hang because the HPET does not get * reprogrammed due to the handler hijacking. Duh, stupid me! * * Both issues require major surgery and especially the kick HPET * again after enabling IOAPIC results in really nasty hackery. * This 'assume periodic works' magic has survived since HPET * support got added, so it's questionable whether this should be * fixed. Both Qemu and the failing hardware machine support * periodic mode despite the fact that both don't advertise it in * the configuration register and both need that extra kick after * switching to IOAPIC. Seems to be a feature... */ hc->evt.features |= CLOCK_EVT_FEAT_PERIODIC; hc->evt.set_state_periodic = hpet_clkevt_set_state_periodic; /* Start HPET legacy interrupts */ hpet_enable_legacy_int(); clockevents_config_and_register(&hc->evt, hpet_freq, HPET_MIN_PROG_DELTA, 0x7FFFFFFF); global_clock_event = &hc->evt; pr_debug("Clockevent registered\n"); } /* * HPET MSI Support */ #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_GENERIC_MSI_IRQ) static void hpet_msi_unmask(struct irq_data *data) { struct hpet_channel *hc = irq_data_get_irq_handler_data(data); unsigned int cfg; cfg = hpet_readl(HPET_Tn_CFG(hc->num)); cfg |= HPET_TN_ENABLE | HPET_TN_FSB; hpet_writel(cfg, HPET_Tn_CFG(hc->num)); } static void hpet_msi_mask(struct irq_data *data) { struct hpet_channel *hc = irq_data_get_irq_handler_data(data); unsigned int cfg; cfg = hpet_readl(HPET_Tn_CFG(hc->num)); cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB); hpet_writel(cfg, HPET_Tn_CFG(hc->num)); } static void hpet_msi_write(struct hpet_channel *hc, struct msi_msg *msg) { hpet_writel(msg->data, HPET_Tn_ROUTE(hc->num)); hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hc->num) + 4); } static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg) { hpet_msi_write(irq_data_get_irq_handler_data(data), msg); } static struct irq_chip hpet_msi_controller __ro_after_init = { .name = "HPET-MSI", .irq_unmask = hpet_msi_unmask, .irq_mask = hpet_msi_mask, .irq_ack = irq_chip_ack_parent, .irq_set_affinity = msi_domain_set_affinity, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_write_msi_msg = hpet_msi_write_msg, .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP, }; static int hpet_msi_init(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg) { irq_domain_set_info(domain, virq, arg->hwirq, info->chip, NULL, handle_edge_irq, arg->data, "edge"); return 0; } static struct msi_domain_ops hpet_msi_domain_ops = { .msi_init = hpet_msi_init, }; static struct msi_domain_info hpet_msi_domain_info = { .ops = &hpet_msi_domain_ops, .chip = &hpet_msi_controller, .flags = MSI_FLAG_USE_DEF_DOM_OPS, }; static struct irq_domain *hpet_create_irq_domain(int hpet_id) { struct msi_domain_info *domain_info; struct irq_domain *parent, *d; struct fwnode_handle *fn; struct irq_fwspec fwspec; if (x86_vector_domain == NULL) return NULL; domain_info = kzalloc(sizeof(*domain_info), GFP_KERNEL); if (!domain_info) return NULL; *domain_info = hpet_msi_domain_info; domain_info->data = (void *)(long)hpet_id; fn = irq_domain_alloc_named_id_fwnode(hpet_msi_controller.name, hpet_id); if (!fn) { kfree(domain_info); return NULL; } fwspec.fwnode = fn; fwspec.param_count = 1; fwspec.param[0] = hpet_id; parent = irq_find_matching_fwspec(&fwspec, DOMAIN_BUS_GENERIC_MSI); if (!parent) { irq_domain_free_fwnode(fn); kfree(domain_info); return NULL; } if (parent != x86_vector_domain) hpet_msi_controller.name = "IR-HPET-MSI"; d = msi_create_irq_domain(fn, domain_info, parent); if (!d) { irq_domain_free_fwnode(fn); kfree(domain_info); } return d; } static inline int hpet_dev_id(struct irq_domain *domain) { struct msi_domain_info *info = msi_get_domain_info(domain); return (int)(long)info->data; } static int hpet_assign_irq(struct irq_domain *domain, struct hpet_channel *hc, int dev_num) { struct irq_alloc_info info; init_irq_alloc_info(&info, NULL); info.type = X86_IRQ_ALLOC_TYPE_HPET; info.data = hc; info.devid = hpet_dev_id(domain); info.hwirq = dev_num; return irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info); } static int hpet_clkevt_msi_resume(struct clock_event_device *evt) { struct hpet_channel *hc = clockevent_to_channel(evt); struct irq_data *data = irq_get_irq_data(hc->irq); struct msi_msg msg; /* Restore the MSI msg and unmask the interrupt */ irq_chip_compose_msi_msg(data, &msg); hpet_msi_write(hc, &msg); hpet_msi_unmask(data); return 0; } static irqreturn_t hpet_msi_interrupt_handler(int irq, void *data) { struct hpet_channel *hc = data; struct clock_event_device *evt = &hc->evt; if (!evt->event_handler) { pr_info("Spurious interrupt HPET channel %d\n", hc->num); return IRQ_HANDLED; } evt->event_handler(evt); return IRQ_HANDLED; } static int hpet_setup_msi_irq(struct hpet_channel *hc) { if (request_irq(hc->irq, hpet_msi_interrupt_handler, IRQF_TIMER | IRQF_NOBALANCING, hc->name, hc)) return -1; disable_irq(hc->irq); irq_set_affinity(hc->irq, cpumask_of(hc->cpu)); enable_irq(hc->irq); pr_debug("%s irq %u for MSI\n", hc->name, hc->irq); return 0; } /* Invoked from the hotplug callback on @cpu */ static void init_one_hpet_msi_clockevent(struct hpet_channel *hc, int cpu) { struct clock_event_device *evt = &hc->evt; hc->cpu = cpu; per_cpu(cpu_hpet_channel, cpu) = hc; hpet_setup_msi_irq(hc); hpet_init_clockevent(hc, 110); evt->tick_resume = hpet_clkevt_msi_resume; clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA, 0x7FFFFFFF); } static struct hpet_channel *hpet_get_unused_clockevent(void) { int i; for (i = 0; i < hpet_base.nr_channels; i++) { struct hpet_channel *hc = hpet_base.channels + i; if (hc->mode != HPET_MODE_CLOCKEVT || hc->in_use) continue; hc->in_use = 1; return hc; } return NULL; } static int hpet_cpuhp_online(unsigned int cpu) { struct hpet_channel *hc = hpet_get_unused_clockevent(); if (hc) init_one_hpet_msi_clockevent(hc, cpu); return 0; } static int hpet_cpuhp_dead(unsigned int cpu) { struct hpet_channel *hc = per_cpu(cpu_hpet_channel, cpu); if (!hc) return 0; free_irq(hc->irq, hc); hc->in_use = 0; per_cpu(cpu_hpet_channel, cpu) = NULL; return 0; } static void __init hpet_select_clockevents(void) { unsigned int i; hpet_base.nr_clockevents = 0; /* No point if MSI is disabled or CPU has an Always Running APIC Timer */ if (hpet_msi_disable || boot_cpu_has(X86_FEATURE_ARAT)) return; hpet_print_config(); hpet_domain = hpet_create_irq_domain(hpet_blockid); if (!hpet_domain) return; for (i = 0; i < hpet_base.nr_channels; i++) { struct hpet_channel *hc = hpet_base.channels + i; int irq; if (hc->mode != HPET_MODE_UNUSED) continue; /* Only consider HPET channel with MSI support */ if (!(hc->boot_cfg & HPET_TN_FSB_CAP)) continue; sprintf(hc->name, "hpet%d", i); irq = hpet_assign_irq(hpet_domain, hc, hc->num); if (irq <= 0) continue; hc->irq = irq; hc->mode = HPET_MODE_CLOCKEVT; if (++hpet_base.nr_clockevents == num_possible_cpus()) break; } pr_info("%d channels of %d reserved for per-cpu timers\n", hpet_base.nr_channels, hpet_base.nr_clockevents); } #else static inline void hpet_select_clockevents(void) { } #define hpet_cpuhp_online NULL #define hpet_cpuhp_dead NULL #endif /* * Clock source related code */ #if defined(CONFIG_SMP) && defined(CONFIG_64BIT) /* * Reading the HPET counter is a very slow operation. If a large number of * CPUs are trying to access the HPET counter simultaneously, it can cause * massive delays and slow down system performance dramatically. This may * happen when HPET is the default clock source instead of TSC. For a * really large system with hundreds of CPUs, the slowdown may be so * severe, that it can actually crash the system because of a NMI watchdog * soft lockup, for example. * * If multiple CPUs are trying to access the HPET counter at the same time, * we don't actually need to read the counter multiple times. Instead, the * other CPUs can use the counter value read by the first CPU in the group. * * This special feature is only enabled on x86-64 systems. It is unlikely * that 32-bit x86 systems will have enough CPUs to require this feature * with its associated locking overhead. We also need 64-bit atomic read. * * The lock and the HPET value are stored together and can be read in a * single atomic 64-bit read. It is explicitly assumed that arch_spinlock_t * is 32 bits in size. */ union hpet_lock { struct { arch_spinlock_t lock; u32 value; }; u64 lockval; }; static union hpet_lock hpet __cacheline_aligned = { { .lock = __ARCH_SPIN_LOCK_UNLOCKED, }, }; static u64 read_hpet(struct clocksource *cs) { unsigned long flags; union hpet_lock old, new; BUILD_BUG_ON(sizeof(union hpet_lock) != 8); /* * Read HPET directly if in NMI. */ if (in_nmi()) return (u64)hpet_readl(HPET_COUNTER); /* * Read the current state of the lock and HPET value atomically. */ old.lockval = READ_ONCE(hpet.lockval); if (arch_spin_is_locked(&old.lock)) goto contended; local_irq_save(flags); if (arch_spin_trylock(&hpet.lock)) { new.value = hpet_readl(HPET_COUNTER); /* * Use WRITE_ONCE() to prevent store tearing. */ WRITE_ONCE(hpet.value, new.value); arch_spin_unlock(&hpet.lock); local_irq_restore(flags); return (u64)new.value; } local_irq_restore(flags); contended: /* * Contended case * -------------- * Wait until the HPET value change or the lock is free to indicate * its value is up-to-date. * * It is possible that old.value has already contained the latest * HPET value while the lock holder was in the process of releasing * the lock. Checking for lock state change will enable us to return * the value immediately instead of waiting for the next HPET reader * to come along. */ do { cpu_relax(); new.lockval = READ_ONCE(hpet.lockval); } while ((new.value == old.value) && arch_spin_is_locked(&new.lock)); return (u64)new.value; } #else /* * For UP or 32-bit. */ static u64 read_hpet(struct clocksource *cs) { return (u64)hpet_readl(HPET_COUNTER); } #endif static struct clocksource clocksource_hpet = { .name = "hpet", .rating = 250, .read = read_hpet, .mask = HPET_MASK, .flags = CLOCK_SOURCE_IS_CONTINUOUS, .resume = hpet_resume_counter, }; /* * AMD SB700 based systems with spread spectrum enabled use a SMM based * HPET emulation to provide proper frequency setting. * * On such systems the SMM code is initialized with the first HPET register * access and takes some time to complete. During this time the config * register reads 0xffffffff. We check for max 1000 loops whether the * config register reads a non-0xffffffff value to make sure that the * HPET is up and running before we proceed any further. * * A counting loop is safe, as the HPET access takes thousands of CPU cycles. * * On non-SB700 based machines this check is only done once and has no * side effects. */ static bool __init hpet_cfg_working(void) { int i; for (i = 0; i < 1000; i++) { if (hpet_readl(HPET_CFG) != 0xFFFFFFFF) return true; } pr_warn("Config register invalid. Disabling HPET\n"); return false; } static bool __init hpet_counting(void) { u64 start, now, t1; hpet_restart_counter(); t1 = hpet_readl(HPET_COUNTER); start = rdtsc(); /* * We don't know the TSC frequency yet, but waiting for * 200000 TSC cycles is safe: * 4 GHz == 50us * 1 GHz == 200us */ do { if (t1 != hpet_readl(HPET_COUNTER)) return true; now = rdtsc(); } while ((now - start) < 200000UL); pr_warn("Counter not counting. HPET disabled\n"); return false; } static bool __init mwait_pc10_supported(void) { unsigned int eax, ebx, ecx, mwait_substates; if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return false; if (!cpu_feature_enabled(X86_FEATURE_MWAIT)) return false; cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &mwait_substates); return (ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) && (ecx & CPUID5_ECX_INTERRUPT_BREAK) && (mwait_substates & (0xF << 28)); } /* * Check whether the system supports PC10. If so force disable HPET as that * stops counting in PC10. This check is overbroad as it does not take any * of the following into account: * * - ACPI tables * - Enablement of intel_idle * - Command line arguments which limit intel_idle C-state support * * That's perfectly fine. HPET is a piece of hardware designed by committee * and the only reasons why it is still in use on modern systems is the * fact that it is impossible to reliably query TSC and CPU frequency via * CPUID or firmware. * * If HPET is functional it is useful for calibrating TSC, but this can be * done via PMTIMER as well which seems to be the last remaining timer on * X86/INTEL platforms that has not been completely wreckaged by feature * creep. * * In theory HPET support should be removed altogether, but there are older * systems out there which depend on it because TSC and APIC timer are * dysfunctional in deeper C-states. * * It's only 20 years now that hardware people have been asked to provide * reliable and discoverable facilities which can be used for timekeeping * and per CPU timer interrupts. * * The probability that this problem is going to be solved in the * foreseeable future is close to zero, so the kernel has to be cluttered * with heuristics to keep up with the ever growing amount of hardware and * firmware trainwrecks. Hopefully some day hardware people will understand * that the approach of "This can be fixed in software" is not sustainable. * Hope dies last... */ static bool __init hpet_is_pc10_damaged(void) { unsigned long long pcfg; /* Check whether PC10 substates are supported */ if (!mwait_pc10_supported()) return false; /* Check whether PC10 is enabled in PKG C-state limit */ rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, pcfg); if ((pcfg & 0xF) < 8) return false; if (hpet_force_user) { pr_warn("HPET force enabled via command line, but dysfunctional in PC10.\n"); return false; } pr_info("HPET dysfunctional in PC10. Force disabled.\n"); boot_hpet_disable = true; return true; } /** * hpet_enable - Try to setup the HPET timer. Returns 1 on success. */ int __init hpet_enable(void) { u32 hpet_period, cfg, id, irq; unsigned int i, channels; struct hpet_channel *hc; u64 freq; if (!is_hpet_capable()) return 0; if (hpet_is_pc10_damaged()) return 0; hpet_set_mapping(); if (!hpet_virt_address) return 0; /* Validate that the config register is working */ if (!hpet_cfg_working()) goto out_nohpet; /* * Read the period and check for a sane value: */ hpet_period = hpet_readl(HPET_PERIOD); if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) goto out_nohpet; /* The period is a femtoseconds value. Convert it to a frequency. */ freq = FSEC_PER_SEC; do_div(freq, hpet_period); hpet_freq = freq; /* * Read the HPET ID register to retrieve the IRQ routing * information and the number of channels */ id = hpet_readl(HPET_ID); hpet_print_config(); /* This is the HPET channel number which is zero based */ channels = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; /* * The legacy routing mode needs at least two channels, tick timer * and the rtc emulation channel. */ if (IS_ENABLED(CONFIG_HPET_EMULATE_RTC) && channels < 2) goto out_nohpet; hc = kcalloc(channels, sizeof(*hc), GFP_KERNEL); if (!hc) { pr_warn("Disabling HPET.\n"); goto out_nohpet; } hpet_base.channels = hc; hpet_base.nr_channels = channels; /* Read, store and sanitize the global configuration */ cfg = hpet_readl(HPET_CFG); hpet_base.boot_cfg = cfg; cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY); hpet_writel(cfg, HPET_CFG); if (cfg) pr_warn("Global config: Unknown bits %#x\n", cfg); /* Read, store and sanitize the per channel configuration */ for (i = 0; i < channels; i++, hc++) { hc->num = i; cfg = hpet_readl(HPET_Tn_CFG(i)); hc->boot_cfg = cfg; irq = (cfg & Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT; hc->irq = irq; cfg &= ~(HPET_TN_ENABLE | HPET_TN_LEVEL | HPET_TN_FSB); hpet_writel(cfg, HPET_Tn_CFG(i)); cfg &= ~(HPET_TN_PERIODIC | HPET_TN_PERIODIC_CAP | HPET_TN_64BIT_CAP | HPET_TN_32BIT | HPET_TN_ROUTE | HPET_TN_FSB | HPET_TN_FSB_CAP); if (cfg) pr_warn("Channel #%u config: Unknown bits %#x\n", i, cfg); } hpet_print_config(); /* * Validate that the counter is counting. This needs to be done * after sanitizing the config registers to properly deal with * force enabled HPETs. */ if (!hpet_counting()) goto out_nohpet; if (tsc_clocksource_watchdog_disabled()) clocksource_hpet.flags |= CLOCK_SOURCE_MUST_VERIFY; clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq); if (id & HPET_ID_LEGSUP) { hpet_legacy_clockevent_register(&hpet_base.channels[0]); hpet_base.channels[0].mode = HPET_MODE_LEGACY; if (IS_ENABLED(CONFIG_HPET_EMULATE_RTC)) hpet_base.channels[1].mode = HPET_MODE_LEGACY; return 1; } return 0; out_nohpet: kfree(hpet_base.channels); hpet_base.channels = NULL; hpet_base.nr_channels = 0; hpet_clear_mapping(); hpet_address = 0; return 0; } /* * The late initialization runs after the PCI quirks have been invoked * which might have detected a system on which the HPET can be enforced. * * Also, the MSI machinery is not working yet when the HPET is initialized * early. * * If the HPET is enabled, then: * * 1) Reserve one channel for /dev/hpet if CONFIG_HPET=y * 2) Reserve up to num_possible_cpus() channels as per CPU clockevents * 3) Setup /dev/hpet if CONFIG_HPET=y * 4) Register hotplug callbacks when clockevents are available */ static __init int hpet_late_init(void) { int ret; if (!hpet_address) { if (!force_hpet_address) return -ENODEV; hpet_address = force_hpet_address; hpet_enable(); } if (!hpet_virt_address) return -ENODEV; hpet_select_device_channel(); hpet_select_clockevents(); hpet_reserve_platform_timers(); hpet_print_config(); if (!hpet_base.nr_clockevents) return 0; ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "x86/hpet:online", hpet_cpuhp_online, NULL); if (ret) return ret; ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "x86/hpet:dead", NULL, hpet_cpuhp_dead); if (ret) goto err_cpuhp; return 0; err_cpuhp: cpuhp_remove_state(CPUHP_AP_X86_HPET_ONLINE); return ret; } fs_initcall(hpet_late_init); void hpet_disable(void) { unsigned int i; u32 cfg; if (!is_hpet_capable() || !hpet_virt_address) return; /* Restore boot configuration with the enable bit cleared */ cfg = hpet_base.boot_cfg; cfg &= ~HPET_CFG_ENABLE; hpet_writel(cfg, HPET_CFG); /* Restore the channel boot configuration */ for (i = 0; i < hpet_base.nr_channels; i++) hpet_writel(hpet_base.channels[i].boot_cfg, HPET_Tn_CFG(i)); /* If the HPET was enabled at boot time, reenable it */ if (hpet_base.boot_cfg & HPET_CFG_ENABLE) hpet_writel(hpet_base.boot_cfg, HPET_CFG); } #ifdef CONFIG_HPET_EMULATE_RTC /* * HPET in LegacyReplacement mode eats up the RTC interrupt line. When HPET * is enabled, we support RTC interrupt functionality in software. * * RTC has 3 kinds of interrupts: * * 1) Update Interrupt - generate an interrupt, every second, when the * RTC clock is updated * 2) Alarm Interrupt - generate an interrupt at a specific time of day * 3) Periodic Interrupt - generate periodic interrupt, with frequencies * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all frequencies in powers of 2) * * (1) and (2) above are implemented using polling at a frequency of 64 Hz: * DEFAULT_RTC_INT_FREQ. * * The exact frequency is a tradeoff between accuracy and interrupt overhead. * * For (3), we use interrupts at 64 Hz, or the user specified periodic frequency, * if it's higher. */ #include <linux/mc146818rtc.h> #include <linux/rtc.h> #define DEFAULT_RTC_INT_FREQ 64 #define DEFAULT_RTC_SHIFT 6 #define RTC_NUM_INTS 1 static unsigned long hpet_rtc_flags; static int hpet_prev_update_sec; static struct rtc_time hpet_alarm_time; static unsigned long hpet_pie_count; static u32 hpet_t1_cmp; static u32 hpet_default_delta; static u32 hpet_pie_delta; static unsigned long hpet_pie_limit; static rtc_irq_handler irq_handler; /* * Check that the HPET counter c1 is ahead of c2 */ static inline int hpet_cnt_ahead(u32 c1, u32 c2) { return (s32)(c2 - c1) < 0; } /* * Registers a IRQ handler. */ int hpet_register_irq_handler(rtc_irq_handler handler) { if (!is_hpet_enabled()) return -ENODEV; if (irq_handler) return -EBUSY; irq_handler = handler; return 0; } EXPORT_SYMBOL_GPL(hpet_register_irq_handler); /* * Deregisters the IRQ handler registered with hpet_register_irq_handler() * and does cleanup. */ void hpet_unregister_irq_handler(rtc_irq_handler handler) { if (!is_hpet_enabled()) return; irq_handler = NULL; hpet_rtc_flags = 0; } EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler); /* * Channel 1 for RTC emulation. We use one shot mode, as periodic mode * is not supported by all HPET implementations for channel 1. * * hpet_rtc_timer_init() is called when the rtc is initialized. */ int hpet_rtc_timer_init(void) { unsigned int cfg, cnt, delta; unsigned long flags; if (!is_hpet_enabled()) return 0; if (!hpet_default_delta) { struct clock_event_device *evt = &hpet_base.channels[0].evt; uint64_t clc; clc = (uint64_t) evt->mult * NSEC_PER_SEC; clc >>= evt->shift + DEFAULT_RTC_SHIFT; hpet_default_delta = clc; } if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) delta = hpet_default_delta; else delta = hpet_pie_delta; local_irq_save(flags); cnt = delta + hpet_readl(HPET_COUNTER); hpet_writel(cnt, HPET_T1_CMP); hpet_t1_cmp = cnt; cfg = hpet_readl(HPET_T1_CFG); cfg &= ~HPET_TN_PERIODIC; cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; hpet_writel(cfg, HPET_T1_CFG); local_irq_restore(flags); return 1; } EXPORT_SYMBOL_GPL(hpet_rtc_timer_init); static void hpet_disable_rtc_channel(void) { u32 cfg = hpet_readl(HPET_T1_CFG); cfg &= ~HPET_TN_ENABLE; hpet_writel(cfg, HPET_T1_CFG); } /* * The functions below are called from rtc driver. * Return 0 if HPET is not being used. * Otherwise do the necessary changes and return 1. */ int hpet_mask_rtc_irq_bit(unsigned long bit_mask) { if (!is_hpet_enabled()) return 0; hpet_rtc_flags &= ~bit_mask; if (unlikely(!hpet_rtc_flags)) hpet_disable_rtc_channel(); return 1; } EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit); int hpet_set_rtc_irq_bit(unsigned long bit_mask) { unsigned long oldbits = hpet_rtc_flags; if (!is_hpet_enabled()) return 0; hpet_rtc_flags |= bit_mask; if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE)) hpet_prev_update_sec = -1; if (!oldbits) hpet_rtc_timer_init(); return 1; } EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit); int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec) { if (!is_hpet_enabled()) return 0; hpet_alarm_time.tm_hour = hrs; hpet_alarm_time.tm_min = min; hpet_alarm_time.tm_sec = sec; return 1; } EXPORT_SYMBOL_GPL(hpet_set_alarm_time); int hpet_set_periodic_freq(unsigned long freq) { uint64_t clc; if (!is_hpet_enabled()) return 0; if (freq <= DEFAULT_RTC_INT_FREQ) { hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq; } else { struct clock_event_device *evt = &hpet_base.channels[0].evt; clc = (uint64_t) evt->mult * NSEC_PER_SEC; do_div(clc, freq); clc >>= evt->shift; hpet_pie_delta = clc; hpet_pie_limit = 0; } return 1; } EXPORT_SYMBOL_GPL(hpet_set_periodic_freq); static void hpet_rtc_timer_reinit(void) { unsigned int delta; int lost_ints = -1; if (unlikely(!hpet_rtc_flags)) hpet_disable_rtc_channel(); if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) delta = hpet_default_delta; else delta = hpet_pie_delta; /* * Increment the comparator value until we are ahead of the * current count. */ do { hpet_t1_cmp += delta; hpet_writel(hpet_t1_cmp, HPET_T1_CMP); lost_ints++; } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER))); if (lost_ints) { if (hpet_rtc_flags & RTC_PIE) hpet_pie_count += lost_ints; if (printk_ratelimit()) pr_warn("Lost %d RTC interrupts\n", lost_ints); } } irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) { struct rtc_time curr_time; unsigned long rtc_int_flag = 0; hpet_rtc_timer_reinit(); memset(&curr_time, 0, sizeof(struct rtc_time)); if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) { if (unlikely(mc146818_get_time(&curr_time, 10) < 0)) { pr_err_ratelimited("unable to read current time from RTC\n"); return IRQ_HANDLED; } } if (hpet_rtc_flags & RTC_UIE && curr_time.tm_sec != hpet_prev_update_sec) { if (hpet_prev_update_sec >= 0) rtc_int_flag = RTC_UF; hpet_prev_update_sec = curr_time.tm_sec; } if (hpet_rtc_flags & RTC_PIE && ++hpet_pie_count >= hpet_pie_limit) { rtc_int_flag |= RTC_PF; hpet_pie_count = 0; } if (hpet_rtc_flags & RTC_AIE && (curr_time.tm_sec == hpet_alarm_time.tm_sec) && (curr_time.tm_min == hpet_alarm_time.tm_min) && (curr_time.tm_hour == hpet_alarm_time.tm_hour)) rtc_int_flag |= RTC_AF; if (rtc_int_flag) { rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8)); if (irq_handler) irq_handler(rtc_int_flag, dev_id); } return IRQ_HANDLED; } EXPORT_SYMBOL_GPL(hpet_rtc_interrupt); #endif
4 4 4 1 3 1 1 1 1 1 1 1 1 2 2 1 1 13 6 6 6 6 6 19 11 4 5 2 1 5 19 19 4 17 18 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2016 Tom Herbert <tom@herbertland.com> */ #include <linux/skbuff.h> #include <linux/skbuff_ref.h> #include <linux/workqueue.h> #include <net/strparser.h> #include <net/tcp.h> #include <net/sock.h> #include <net/tls.h> #include "tls.h" static struct workqueue_struct *tls_strp_wq; void tls_strp_abort_strp(struct tls_strparser *strp, int err) { if (strp->stopped) return; strp->stopped = 1; /* Report an error on the lower socket */ WRITE_ONCE(strp->sk->sk_err, -err); /* Paired with smp_rmb() in tcp_poll() */ smp_wmb(); sk_error_report(strp->sk); } static void tls_strp_anchor_free(struct tls_strparser *strp) { struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1); if (!strp->copy_mode) shinfo->frag_list = NULL; consume_skb(strp->anchor); strp->anchor = NULL; } static struct sk_buff * tls_strp_skb_copy(struct tls_strparser *strp, struct sk_buff *in_skb, int offset, int len) { struct sk_buff *skb; int i, err; skb = alloc_skb_with_frags(0, len, TLS_PAGE_ORDER, &err, strp->sk->sk_allocation); if (!skb) return NULL; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; WARN_ON_ONCE(skb_copy_bits(in_skb, offset, skb_frag_address(frag), skb_frag_size(frag))); offset += skb_frag_size(frag); } skb->len = len; skb->data_len = len; skb_copy_header(skb, in_skb); return skb; } /* Create a new skb with the contents of input copied to its page frags */ static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp) { struct strp_msg *rxm; struct sk_buff *skb; skb = tls_strp_skb_copy(strp, strp->anchor, strp->stm.offset, strp->stm.full_len); if (!skb) return NULL; rxm = strp_msg(skb); rxm->offset = 0; return skb; } /* Steal the input skb, input msg is invalid after calling this function */ struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx) { struct tls_strparser *strp = &ctx->strp; #ifdef CONFIG_TLS_DEVICE DEBUG_NET_WARN_ON_ONCE(!strp->anchor->decrypted); #else /* This function turns an input into an output, * that can only happen if we have offload. */ WARN_ON(1); #endif if (strp->copy_mode) { struct sk_buff *skb; /* Replace anchor with an empty skb, this is a little * dangerous but __tls_cur_msg() warns on empty skbs * so hopefully we'll catch abuses. */ skb = alloc_skb(0, strp->sk->sk_allocation); if (!skb) return NULL; swap(strp->anchor, skb); return skb; } return tls_strp_msg_make_copy(strp); } /* Force the input skb to be in copy mode. The data ownership remains * with the input skb itself (meaning unpause will wipe it) but it can * be modified. */ int tls_strp_msg_cow(struct tls_sw_context_rx *ctx) { struct tls_strparser *strp = &ctx->strp; struct sk_buff *skb; if (strp->copy_mode) return 0; skb = tls_strp_msg_make_copy(strp); if (!skb) return -ENOMEM; tls_strp_anchor_free(strp); strp->anchor = skb; tcp_read_done(strp->sk, strp->stm.full_len); strp->copy_mode = 1; return 0; } /* Make a clone (in the skb sense) of the input msg to keep a reference * to the underlying data. The reference-holding skbs get placed on * @dst. */ int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst) { struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); if (strp->copy_mode) { struct sk_buff *skb; WARN_ON_ONCE(!shinfo->nr_frags); /* We can't skb_clone() the anchor, it gets wiped by unpause */ skb = alloc_skb(0, strp->sk->sk_allocation); if (!skb) return -ENOMEM; __skb_queue_tail(dst, strp->anchor); strp->anchor = skb; } else { struct sk_buff *iter, *clone; int chunk, len, offset; offset = strp->stm.offset; len = strp->stm.full_len; iter = shinfo->frag_list; while (len > 0) { if (iter->len <= offset) { offset -= iter->len; goto next; } chunk = iter->len - offset; offset = 0; clone = skb_clone(iter, strp->sk->sk_allocation); if (!clone) return -ENOMEM; __skb_queue_tail(dst, clone); len -= chunk; next: iter = iter->next; } } return 0; } static void tls_strp_flush_anchor_copy(struct tls_strparser *strp) { struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); int i; DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1); for (i = 0; i < shinfo->nr_frags; i++) __skb_frag_unref(&shinfo->frags[i], false); shinfo->nr_frags = 0; if (strp->copy_mode) { kfree_skb_list(shinfo->frag_list); shinfo->frag_list = NULL; } strp->copy_mode = 0; strp->mixed_decrypted = 0; } static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb, struct sk_buff *in_skb, unsigned int offset, size_t in_len) { unsigned int nfrag = skb->len / PAGE_SIZE; size_t len, chunk; skb_frag_t *frag; int sz; if (unlikely(nfrag >= skb_shinfo(skb)->nr_frags)) { DEBUG_NET_WARN_ON_ONCE(1); return -EMSGSIZE; } frag = &skb_shinfo(skb)->frags[nfrag]; len = in_len; /* First make sure we got the header */ if (!strp->stm.full_len) { /* Assume one page is more than enough for headers */ chunk = min_t(size_t, len, PAGE_SIZE - skb_frag_size(frag)); WARN_ON_ONCE(skb_copy_bits(in_skb, offset, skb_frag_address(frag) + skb_frag_size(frag), chunk)); skb->len += chunk; skb->data_len += chunk; skb_frag_size_add(frag, chunk); sz = tls_rx_msg_size(strp, skb); if (sz < 0) return sz; /* We may have over-read, sz == 0 is guaranteed under-read */ if (unlikely(sz && sz < skb->len)) { int over = skb->len - sz; WARN_ON_ONCE(over > chunk); skb->len -= over; skb->data_len -= over; skb_frag_size_add(frag, -over); chunk -= over; } frag++; len -= chunk; offset += chunk; strp->stm.full_len = sz; if (!strp->stm.full_len) goto read_done; } /* Load up more data */ while (len && strp->stm.full_len > skb->len) { chunk = min_t(size_t, len, strp->stm.full_len - skb->len); chunk = min_t(size_t, chunk, PAGE_SIZE - skb_frag_size(frag)); WARN_ON_ONCE(skb_copy_bits(in_skb, offset, skb_frag_address(frag) + skb_frag_size(frag), chunk)); skb->len += chunk; skb->data_len += chunk; skb_frag_size_add(frag, chunk); frag++; len -= chunk; offset += chunk; } read_done: return in_len - len; } static int tls_strp_copyin_skb(struct tls_strparser *strp, struct sk_buff *skb, struct sk_buff *in_skb, unsigned int offset, size_t in_len) { struct sk_buff *nskb, *first, *last; struct skb_shared_info *shinfo; size_t chunk; int sz; if (strp->stm.full_len) chunk = strp->stm.full_len - skb->len; else chunk = TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE; chunk = min(chunk, in_len); nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk); if (!nskb) return -ENOMEM; shinfo = skb_shinfo(skb); if (!shinfo->frag_list) { shinfo->frag_list = nskb; nskb->prev = nskb; } else { first = shinfo->frag_list; last = first->prev; last->next = nskb; first->prev = nskb; } skb->len += chunk; skb->data_len += chunk; if (!strp->stm.full_len) { sz = tls_rx_msg_size(strp, skb); if (sz < 0) return sz; /* We may have over-read, sz == 0 is guaranteed under-read */ if (unlikely(sz && sz < skb->len)) { int over = skb->len - sz; WARN_ON_ONCE(over > chunk); skb->len -= over; skb->data_len -= over; __pskb_trim(nskb, nskb->len - over); chunk -= over; } strp->stm.full_len = sz; } return chunk; } static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb, unsigned int offset, size_t in_len) { struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data; struct sk_buff *skb; int ret; if (strp->msg_ready) return 0; skb = strp->anchor; if (!skb->len) skb_copy_decrypted(skb, in_skb); else strp->mixed_decrypted |= !!skb_cmp_decrypted(skb, in_skb); if (IS_ENABLED(CONFIG_TLS_DEVICE) && strp->mixed_decrypted) ret = tls_strp_copyin_skb(strp, skb, in_skb, offset, in_len); else ret = tls_strp_copyin_frag(strp, skb, in_skb, offset, in_len); if (ret < 0) { desc->error = ret; ret = 0; } if (strp->stm.full_len && strp->stm.full_len == skb->len) { desc->count = 0; WRITE_ONCE(strp->msg_ready, 1); tls_rx_msg_ready(strp); } return ret; } static int tls_strp_read_copyin(struct tls_strparser *strp) { read_descriptor_t desc; desc.arg.data = strp; desc.error = 0; desc.count = 1; /* give more than one skb per call */ /* sk should be locked here, so okay to do read_sock */ tcp_read_sock(strp->sk, &desc, tls_strp_copyin); return desc.error; } static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort) { struct skb_shared_info *shinfo; struct page *page; int need_spc, len; /* If the rbuf is small or rcv window has collapsed to 0 we need * to read the data out. Otherwise the connection will stall. * Without pressure threshold of INT_MAX will never be ready. */ if (likely(qshort && !tcp_epollin_ready(strp->sk, INT_MAX))) return 0; shinfo = skb_shinfo(strp->anchor); /* If we don't know the length go max plus page for cipher overhead */ need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE; for (len = need_spc; len > 0; len -= PAGE_SIZE) { page = alloc_page(strp->sk->sk_allocation); if (!page) { tls_strp_flush_anchor_copy(strp); return -ENOMEM; } skb_fill_page_desc(strp->anchor, shinfo->nr_frags++, page, 0, 0); } shinfo->frag_list = NULL; strp->copy_mode = 1; strp->stm.offset = 0; strp->anchor->len = 0; strp->anchor->data_len = 0; strp->anchor->truesize = round_up(need_spc, PAGE_SIZE); tls_strp_read_copyin(strp); return 0; } static bool tls_strp_check_queue_ok(struct tls_strparser *strp) { unsigned int len = strp->stm.offset + strp->stm.full_len; struct sk_buff *first, *skb; u32 seq; first = skb_shinfo(strp->anchor)->frag_list; skb = first; seq = TCP_SKB_CB(first)->seq; /* Make sure there's no duplicate data in the queue, * and the decrypted status matches. */ while (skb->len < len) { seq += skb->len; len -= skb->len; skb = skb->next; if (TCP_SKB_CB(skb)->seq != seq) return false; if (skb_cmp_decrypted(first, skb)) return false; } return true; } static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len) { struct tcp_sock *tp = tcp_sk(strp->sk); struct sk_buff *first; u32 offset; first = tcp_recv_skb(strp->sk, tp->copied_seq, &offset); if (WARN_ON_ONCE(!first)) return; /* Bestow the state onto the anchor */ strp->anchor->len = offset + len; strp->anchor->data_len = offset + len; strp->anchor->truesize = offset + len; skb_shinfo(strp->anchor)->frag_list = first; skb_copy_header(strp->anchor, first); strp->anchor->destructor = NULL; strp->stm.offset = offset; } bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) { struct strp_msg *rxm; struct tls_msg *tlm; DEBUG_NET_WARN_ON_ONCE(!strp->msg_ready); DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len); if (!strp->copy_mode && force_refresh) { if (unlikely(tcp_inq(strp->sk) < strp->stm.full_len)) { WRITE_ONCE(strp->msg_ready, 0); memset(&strp->stm, 0, sizeof(strp->stm)); return false; } tls_strp_load_anchor_with_queue(strp, strp->stm.full_len); } rxm = strp_msg(strp->anchor); rxm->full_len = strp->stm.full_len; rxm->offset = strp->stm.offset; tlm = tls_msg(strp->anchor); tlm->control = strp->mark; return true; } /* Called with lock held on lower socket */ static int tls_strp_read_sock(struct tls_strparser *strp) { int sz, inq; inq = tcp_inq(strp->sk); if (inq < 1) return 0; if (unlikely(strp->copy_mode)) return tls_strp_read_copyin(strp); if (inq < strp->stm.full_len) return tls_strp_read_copy(strp, true); tls_strp_load_anchor_with_queue(strp, inq); if (!strp->stm.full_len) { sz = tls_rx_msg_size(strp, strp->anchor); if (sz < 0) return sz; strp->stm.full_len = sz; if (!strp->stm.full_len || inq < strp->stm.full_len) return tls_strp_read_copy(strp, true); } if (!tls_strp_check_queue_ok(strp)) return tls_strp_read_copy(strp, false); WRITE_ONCE(strp->msg_ready, 1); tls_rx_msg_ready(strp); return 0; } void tls_strp_check_rcv(struct tls_strparser *strp) { if (unlikely(strp->stopped) || strp->msg_ready) return; if (tls_strp_read_sock(strp) == -ENOMEM) queue_work(tls_strp_wq, &strp->work); } /* Lower sock lock held */ void tls_strp_data_ready(struct tls_strparser *strp) { /* This check is needed to synchronize with do_tls_strp_work. * do_tls_strp_work acquires a process lock (lock_sock) whereas * the lock held here is bh_lock_sock. The two locks can be * held by different threads at the same time, but bh_lock_sock * allows a thread in BH context to safely check if the process * lock is held. In this case, if the lock is held, queue work. */ if (sock_owned_by_user_nocheck(strp->sk)) { queue_work(tls_strp_wq, &strp->work); return; } tls_strp_check_rcv(strp); } static void tls_strp_work(struct work_struct *w) { struct tls_strparser *strp = container_of(w, struct tls_strparser, work); lock_sock(strp->sk); tls_strp_check_rcv(strp); release_sock(strp->sk); } void tls_strp_msg_done(struct tls_strparser *strp) { WARN_ON(!strp->stm.full_len); if (likely(!strp->copy_mode)) tcp_read_done(strp->sk, strp->stm.full_len); else tls_strp_flush_anchor_copy(strp); WRITE_ONCE(strp->msg_ready, 0); memset(&strp->stm, 0, sizeof(strp->stm)); tls_strp_check_rcv(strp); } void tls_strp_stop(struct tls_strparser *strp) { strp->stopped = 1; } int tls_strp_init(struct tls_strparser *strp, struct sock *sk) { memset(strp, 0, sizeof(*strp)); strp->sk = sk; strp->anchor = alloc_skb(0, GFP_KERNEL); if (!strp->anchor) return -ENOMEM; INIT_WORK(&strp->work, tls_strp_work); return 0; } /* strp must already be stopped so that tls_strp_recv will no longer be called. * Note that tls_strp_done is not called with the lower socket held. */ void tls_strp_done(struct tls_strparser *strp) { WARN_ON(!strp->stopped); cancel_work_sync(&strp->work); tls_strp_anchor_free(strp); } int __init tls_strp_dev_init(void) { tls_strp_wq = create_workqueue("tls-strp"); if (unlikely(!tls_strp_wq)) return -ENOMEM; return 0; } void tls_strp_dev_exit(void) { destroy_workqueue(tls_strp_wq); }
1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 // SPDX-License-Identifier: GPL-2.0-or-later /* * s2255drv.c - a driver for the Sensoray 2255 USB video capture device * * Copyright (C) 2007-2014 by Sensoray Company Inc. * Dean Anderson * * Some video buffer code based on vivi driver: * * Sensoray 2255 device supports 4 simultaneous channels. * The channels are not "crossbar" inputs, they are physically * attached to separate video decoders. * * Because of USB2.0 bandwidth limitations. There is only a * certain amount of data which may be transferred at one time. * * Example maximum bandwidth utilization: * * -full size, color mode YUYV or YUV422P: 2 channels at once * -full or half size Grey scale: all 4 channels at once * -half size, color mode YUYV or YUV422P: all 4 channels at once * -full size, color mode YUYV or YUV422P 1/2 frame rate: all 4 channels * at once. */ #include <linux/module.h> #include <linux/firmware.h> #include <linux/kernel.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/usb.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-vmalloc.h> #include <media/v4l2-common.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #define S2255_VERSION "1.25.1" #define FIRMWARE_FILE_NAME "f2255usb.bin" /* default JPEG quality */ #define S2255_DEF_JPEG_QUAL 50 /* vendor request in */ #define S2255_VR_IN 0 /* vendor request out */ #define S2255_VR_OUT 1 /* firmware query */ #define S2255_VR_FW 0x30 /* USB endpoint number for configuring the device */ #define S2255_CONFIG_EP 2 /* maximum time for DSP to start responding after last FW word loaded(ms) */ #define S2255_DSP_BOOTTIME 800 /* maximum time to wait for firmware to load (ms) */ #define S2255_LOAD_TIMEOUT (5000 + S2255_DSP_BOOTTIME) #define S2255_MIN_BUFS 2 #define S2255_SETMODE_TIMEOUT 500 #define S2255_VIDSTATUS_TIMEOUT 350 #define S2255_MARKER_FRAME cpu_to_le32(0x2255DA4AL) #define S2255_MARKER_RESPONSE cpu_to_le32(0x2255ACACL) #define S2255_RESPONSE_SETMODE cpu_to_le32(0x01) #define S2255_RESPONSE_FW cpu_to_le32(0x10) #define S2255_RESPONSE_STATUS cpu_to_le32(0x20) #define S2255_USB_XFER_SIZE (16 * 1024) #define MAX_CHANNELS 4 #define SYS_FRAMES 4 /* maximum size is PAL full size plus room for the marker header(s) */ #define SYS_FRAMES_MAXSIZE (720*288*2*2 + 4096) #define DEF_USB_BLOCK S2255_USB_XFER_SIZE #define LINE_SZ_4CIFS_NTSC 640 #define LINE_SZ_2CIFS_NTSC 640 #define LINE_SZ_1CIFS_NTSC 320 #define LINE_SZ_4CIFS_PAL 704 #define LINE_SZ_2CIFS_PAL 704 #define LINE_SZ_1CIFS_PAL 352 #define NUM_LINES_4CIFS_NTSC 240 #define NUM_LINES_2CIFS_NTSC 240 #define NUM_LINES_1CIFS_NTSC 240 #define NUM_LINES_4CIFS_PAL 288 #define NUM_LINES_2CIFS_PAL 288 #define NUM_LINES_1CIFS_PAL 288 #define LINE_SZ_DEF 640 #define NUM_LINES_DEF 240 /* predefined settings */ #define FORMAT_NTSC 1 #define FORMAT_PAL 2 #define SCALE_4CIFS 1 /* 640x480(NTSC) or 704x576(PAL) */ #define SCALE_2CIFS 2 /* 640x240(NTSC) or 704x288(PAL) */ #define SCALE_1CIFS 3 /* 320x240(NTSC) or 352x288(PAL) */ /* SCALE_4CIFSI is the 2 fields interpolated into one */ #define SCALE_4CIFSI 4 /* 640x480(NTSC) or 704x576(PAL) high quality */ #define COLOR_YUVPL 1 /* YUV planar */ #define COLOR_YUVPK 2 /* YUV packed */ #define COLOR_Y8 4 /* monochrome */ #define COLOR_JPG 5 /* JPEG */ #define MASK_COLOR 0x000000ff #define MASK_JPG_QUALITY 0x0000ff00 #define MASK_INPUT_TYPE 0x000f0000 /* frame decimation. */ #define FDEC_1 1 /* capture every frame. default */ #define FDEC_2 2 /* capture every 2nd frame */ #define FDEC_3 3 /* capture every 3rd frame */ #define FDEC_5 5 /* capture every 5th frame */ /*------------------------------------------------------- * Default mode parameters. *-------------------------------------------------------*/ #define DEF_SCALE SCALE_4CIFS #define DEF_COLOR COLOR_YUVPL #define DEF_FDEC FDEC_1 #define DEF_BRIGHT 0 #define DEF_CONTRAST 0x5c #define DEF_SATURATION 0x80 #define DEF_HUE 0 /* usb config commands */ #define IN_DATA_TOKEN cpu_to_le32(0x2255c0de) #define CMD_2255 0xc2255000 #define CMD_SET_MODE cpu_to_le32((CMD_2255 | 0x10)) #define CMD_START cpu_to_le32((CMD_2255 | 0x20)) #define CMD_STOP cpu_to_le32((CMD_2255 | 0x30)) #define CMD_STATUS cpu_to_le32((CMD_2255 | 0x40)) struct s2255_mode { u32 format; /* input video format (NTSC, PAL) */ u32 scale; /* output video scale */ u32 color; /* output video color format */ u32 fdec; /* frame decimation */ u32 bright; /* brightness */ u32 contrast; /* contrast */ u32 saturation; /* saturation */ u32 hue; /* hue (NTSC only)*/ u32 single; /* capture 1 frame at a time (!=0), continuously (==0)*/ u32 usb_block; /* block size. should be 4096 of DEF_USB_BLOCK */ u32 restart; /* if DSP requires restart */ }; #define S2255_READ_IDLE 0 #define S2255_READ_FRAME 1 /* frame structure */ struct s2255_framei { unsigned long size; unsigned long ulState; /* ulState:S2255_READ_IDLE, S2255_READ_FRAME*/ void *lpvbits; /* image data */ unsigned long cur_size; /* current data copied to it */ }; /* image buffer structure */ struct s2255_bufferi { unsigned long dwFrames; /* number of frames in buffer */ struct s2255_framei frame[SYS_FRAMES]; /* array of FRAME structures */ }; #define DEF_MODEI_NTSC_CONT {FORMAT_NTSC, DEF_SCALE, DEF_COLOR, \ DEF_FDEC, DEF_BRIGHT, DEF_CONTRAST, DEF_SATURATION, \ DEF_HUE, 0, DEF_USB_BLOCK, 0} /* for firmware loading, fw_state */ #define S2255_FW_NOTLOADED 0 #define S2255_FW_LOADED_DSPWAIT 1 #define S2255_FW_SUCCESS 2 #define S2255_FW_FAILED 3 #define S2255_FW_DISCONNECTING 4 #define S2255_FW_MARKER cpu_to_le32(0x22552f2f) /* 2255 read states */ #define S2255_READ_IDLE 0 #define S2255_READ_FRAME 1 struct s2255_fw { int fw_loaded; int fw_size; struct urb *fw_urb; atomic_t fw_state; void *pfw_data; wait_queue_head_t wait_fw; const struct firmware *fw; }; struct s2255_pipeinfo { u32 max_transfer_size; u32 cur_transfer_size; u8 *transfer_buffer; u32 state; void *stream_urb; void *dev; /* back pointer to s2255_dev struct*/ u32 err_count; u32 idx; }; struct s2255_fmt; /*forward declaration */ struct s2255_dev; /* 2255 video channel */ struct s2255_vc { struct s2255_dev *dev; struct video_device vdev; struct v4l2_ctrl_handler hdl; struct v4l2_ctrl *jpegqual_ctrl; int resources; struct list_head buf_list; struct s2255_bufferi buffer; struct s2255_mode mode; v4l2_std_id std; /* jpeg compression */ unsigned jpegqual; /* capture parameters (for high quality mode full size) */ struct v4l2_captureparm cap_parm; int cur_frame; int last_frame; /* allocated image size */ unsigned long req_image_size; /* received packet size */ unsigned long pkt_size; int bad_payload; unsigned long frame_count; /* if JPEG image */ int jpg_size; /* if channel configured to default state */ int configured; wait_queue_head_t wait_setmode; int setmode_ready; /* video status items */ int vidstatus; wait_queue_head_t wait_vidstatus; int vidstatus_ready; unsigned int width; unsigned int height; enum v4l2_field field; const struct s2255_fmt *fmt; int idx; /* channel number on device, 0-3 */ struct vb2_queue vb_vidq; struct mutex vb_lock; /* streaming lock */ spinlock_t qlock; }; struct s2255_dev { struct s2255_vc vc[MAX_CHANNELS]; struct v4l2_device v4l2_dev; refcount_t num_channels; int frames; struct mutex lock; /* channels[].vdev.lock */ struct mutex cmdlock; /* protects cmdbuf */ struct usb_device *udev; struct usb_interface *interface; u8 read_endpoint; struct timer_list timer; struct s2255_fw *fw_data; struct s2255_pipeinfo pipe; u32 cc; /* current channel */ int frame_ready; int chn_ready; /* dsp firmware version (f2255usb.bin) */ int dsp_fw_ver; u16 pid; /* product id */ #define S2255_CMDBUF_SIZE 512 __le32 *cmdbuf; }; static inline struct s2255_dev *to_s2255_dev(struct v4l2_device *v4l2_dev) { return container_of(v4l2_dev, struct s2255_dev, v4l2_dev); } struct s2255_fmt { u32 fourcc; int depth; }; /* buffer for one video frame */ struct s2255_buffer { /* common v4l buffer stuff -- must be first */ struct vb2_v4l2_buffer vb; struct list_head list; }; /* current cypress EEPROM firmware version */ #define S2255_CUR_USB_FWVER ((3 << 8) | 12) /* current DSP FW version */ #define S2255_CUR_DSP_FWVER 10104 /* Need DSP version 5+ for video status feature */ #define S2255_MIN_DSP_STATUS 5 #define S2255_MIN_DSP_COLORFILTER 8 #define S2255_NORMS (V4L2_STD_ALL) /* private V4L2 controls */ /* * The following chart displays how COLORFILTER should be set * ========================================================= * = fourcc = COLORFILTER = * = =============================== * = = 0 = 1 = * ========================================================= * = V4L2_PIX_FMT_GREY(Y8) = monochrome from = monochrome= * = = s-video or = composite = * = = B/W camera = input = * ========================================================= * = other = color, svideo = color, = * = = = composite = * ========================================================= * * Notes: * channels 0-3 on 2255 are composite * channels 0-1 on 2257 are composite, 2-3 are s-video * If COLORFILTER is 0 with a composite color camera connected, * the output will appear monochrome but hatching * will occur. * COLORFILTER is different from "color killer" and "color effects" * for reasons above. */ #define S2255_V4L2_YC_ON 1 #define S2255_V4L2_YC_OFF 0 #define V4L2_CID_S2255_COLORFILTER (V4L2_CID_USER_S2255_BASE + 0) /* frame prefix size (sent once every frame) */ #define PREFIX_SIZE 512 /* Channels on box are in reverse order */ static unsigned long G_chnmap[MAX_CHANNELS] = {3, 2, 1, 0}; static int debug; static int s2255_start_readpipe(struct s2255_dev *dev); static void s2255_stop_readpipe(struct s2255_dev *dev); static int s2255_start_acquire(struct s2255_vc *vc); static int s2255_stop_acquire(struct s2255_vc *vc); static void s2255_fillbuff(struct s2255_vc *vc, struct s2255_buffer *buf, int jpgsize); static int s2255_set_mode(struct s2255_vc *vc, struct s2255_mode *mode); static int s2255_board_shutdown(struct s2255_dev *dev); static void s2255_fwload_start(struct s2255_dev *dev); static void s2255_destroy(struct s2255_dev *dev); static long s2255_vendor_req(struct s2255_dev *dev, unsigned char req, u16 index, u16 value, void *buf, s32 buf_len, int bOut); /* dev_err macro with driver name */ #define S2255_DRIVER_NAME "s2255" #define s2255_dev_err(dev, fmt, arg...) \ dev_err(dev, S2255_DRIVER_NAME " - " fmt, ##arg) #define dprintk(dev, level, fmt, arg...) \ v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ## arg) static struct usb_driver s2255_driver; /* start video number */ static int video_nr = -1; /* /dev/videoN, -1 for autodetect */ /* Enable jpeg capture. */ static int jpeg_enable = 1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level(0-100) default 0"); module_param(video_nr, int, 0644); MODULE_PARM_DESC(video_nr, "start video minor(-1 default autodetect)"); module_param(jpeg_enable, int, 0644); MODULE_PARM_DESC(jpeg_enable, "Jpeg enable(1-on 0-off) default 1"); /* USB device table */ #define USB_SENSORAY_VID 0x1943 static const struct usb_device_id s2255_table[] = { {USB_DEVICE(USB_SENSORAY_VID, 0x2255)}, {USB_DEVICE(USB_SENSORAY_VID, 0x2257)}, /*same family as 2255*/ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, s2255_table); #define BUFFER_TIMEOUT msecs_to_jiffies(400) /* image formats. */ /* JPEG formats must be defined last to support jpeg_enable parameter */ static const struct s2255_fmt formats[] = { { .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16 }, { .fourcc = V4L2_PIX_FMT_UYVY, .depth = 16 }, { .fourcc = V4L2_PIX_FMT_YUV422P, .depth = 16 }, { .fourcc = V4L2_PIX_FMT_GREY, .depth = 8 }, { .fourcc = V4L2_PIX_FMT_JPEG, .depth = 24 }, { .fourcc = V4L2_PIX_FMT_MJPEG, .depth = 24 } }; static int norm_maxw(struct s2255_vc *vc) { return (vc->std & V4L2_STD_525_60) ? LINE_SZ_4CIFS_NTSC : LINE_SZ_4CIFS_PAL; } static int norm_maxh(struct s2255_vc *vc) { return (vc->std & V4L2_STD_525_60) ? (NUM_LINES_1CIFS_NTSC * 2) : (NUM_LINES_1CIFS_PAL * 2); } static int norm_minw(struct s2255_vc *vc) { return (vc->std & V4L2_STD_525_60) ? LINE_SZ_1CIFS_NTSC : LINE_SZ_1CIFS_PAL; } static int norm_minh(struct s2255_vc *vc) { return (vc->std & V4L2_STD_525_60) ? (NUM_LINES_1CIFS_NTSC) : (NUM_LINES_1CIFS_PAL); } /* * TODO: fixme: move YUV reordering to hardware * converts 2255 planar format to yuyv or uyvy */ static void planar422p_to_yuv_packed(const unsigned char *in, unsigned char *out, int width, int height, int fmt) { unsigned char *pY; unsigned char *pCb; unsigned char *pCr; unsigned long size = height * width; unsigned int i; pY = (unsigned char *)in; pCr = (unsigned char *)in + height * width; pCb = (unsigned char *)in + height * width + (height * width / 2); for (i = 0; i < size * 2; i += 4) { out[i] = (fmt == V4L2_PIX_FMT_YUYV) ? *pY++ : *pCr++; out[i + 1] = (fmt == V4L2_PIX_FMT_YUYV) ? *pCr++ : *pY++; out[i + 2] = (fmt == V4L2_PIX_FMT_YUYV) ? *pY++ : *pCb++; out[i + 3] = (fmt == V4L2_PIX_FMT_YUYV) ? *pCb++ : *pY++; } return; } static void s2255_reset_dsppower(struct s2255_dev *dev) { s2255_vendor_req(dev, 0x40, 0x0000, 0x0001, NULL, 0, 1); msleep(50); s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1); msleep(600); s2255_vendor_req(dev, 0x10, 0x0000, 0x0000, NULL, 0, 1); return; } /* kickstarts the firmware loading. from probe */ static void s2255_timer(struct timer_list *t) { struct s2255_dev *dev = timer_container_of(dev, t, timer); struct s2255_fw *data = dev->fw_data; if (usb_submit_urb(data->fw_urb, GFP_ATOMIC) < 0) { pr_err("s2255: can't submit urb\n"); atomic_set(&data->fw_state, S2255_FW_FAILED); /* wake up anything waiting for the firmware */ wake_up(&data->wait_fw); return; } } /* this loads the firmware asynchronously. Originally this was done synchronously in probe. But it is better to load it asynchronously here than block inside the probe function. Blocking inside probe affects boot time. FW loading is triggered by the timer in the probe function */ static void s2255_fwchunk_complete(struct urb *urb) { struct s2255_fw *data = urb->context; struct usb_device *udev = urb->dev; int len; if (urb->status) { dev_err(&udev->dev, "URB failed with status %d\n", urb->status); atomic_set(&data->fw_state, S2255_FW_FAILED); /* wake up anything waiting for the firmware */ wake_up(&data->wait_fw); return; } if (data->fw_urb == NULL) { s2255_dev_err(&udev->dev, "disconnected\n"); atomic_set(&data->fw_state, S2255_FW_FAILED); /* wake up anything waiting for the firmware */ wake_up(&data->wait_fw); return; } #define CHUNK_SIZE 512 /* all USB transfers must be done with continuous kernel memory. can't allocate more than 128k in current linux kernel, so upload the firmware in chunks */ if (data->fw_loaded < data->fw_size) { len = (data->fw_loaded + CHUNK_SIZE) > data->fw_size ? data->fw_size % CHUNK_SIZE : CHUNK_SIZE; if (len < CHUNK_SIZE) memset(data->pfw_data, 0, CHUNK_SIZE); memcpy(data->pfw_data, (char *) data->fw->data + data->fw_loaded, len); usb_fill_bulk_urb(data->fw_urb, udev, usb_sndbulkpipe(udev, 2), data->pfw_data, CHUNK_SIZE, s2255_fwchunk_complete, data); if (usb_submit_urb(data->fw_urb, GFP_ATOMIC) < 0) { dev_err(&udev->dev, "failed submit URB\n"); atomic_set(&data->fw_state, S2255_FW_FAILED); /* wake up anything waiting for the firmware */ wake_up(&data->wait_fw); return; } data->fw_loaded += len; } else atomic_set(&data->fw_state, S2255_FW_LOADED_DSPWAIT); return; } static void s2255_got_frame(struct s2255_vc *vc, int jpgsize) { struct s2255_buffer *buf; struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev); unsigned long flags = 0; spin_lock_irqsave(&vc->qlock, flags); if (list_empty(&vc->buf_list)) { dprintk(dev, 1, "No active queue to serve\n"); spin_unlock_irqrestore(&vc->qlock, flags); return; } buf = list_entry(vc->buf_list.next, struct s2255_buffer, list); list_del(&buf->list); buf->vb.vb2_buf.timestamp = ktime_get_ns(); buf->vb.field = vc->field; buf->vb.sequence = vc->frame_count; spin_unlock_irqrestore(&vc->qlock, flags); s2255_fillbuff(vc, buf, jpgsize); /* tell v4l buffer was filled */ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); dprintk(dev, 2, "%s: [buf] [%p]\n", __func__, buf); } static const struct s2255_fmt *format_by_fourcc(int fourcc) { unsigned int i; for (i = 0; i < ARRAY_SIZE(formats); i++) { if (-1 == formats[i].fourcc) continue; if (!jpeg_enable && ((formats[i].fourcc == V4L2_PIX_FMT_JPEG) || (formats[i].fourcc == V4L2_PIX_FMT_MJPEG))) continue; if (formats[i].fourcc == fourcc) return formats + i; } return NULL; } /* video buffer vmalloc implementation based partly on VIVI driver which is * Copyright (c) 2006 by * Mauro Carvalho Chehab <mchehab--a.t--infradead.org> * Ted Walther <ted--a.t--enumera.com> * John Sokol <sokol--a.t--videotechnology.com> * http://v4l.videotechnology.com/ * */ static void s2255_fillbuff(struct s2255_vc *vc, struct s2255_buffer *buf, int jpgsize) { int pos = 0; const char *tmpbuf; char *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0); unsigned long last_frame; struct s2255_dev *dev = vc->dev; if (!vbuf) return; last_frame = vc->last_frame; if (last_frame != -1) { tmpbuf = (const char *)vc->buffer.frame[last_frame].lpvbits; switch (vc->fmt->fourcc) { case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_UYVY: planar422p_to_yuv_packed((const unsigned char *)tmpbuf, vbuf, vc->width, vc->height, vc->fmt->fourcc); break; case V4L2_PIX_FMT_GREY: memcpy(vbuf, tmpbuf, vc->width * vc->height); break; case V4L2_PIX_FMT_JPEG: case V4L2_PIX_FMT_MJPEG: vb2_set_plane_payload(&buf->vb.vb2_buf, 0, jpgsize); memcpy(vbuf, tmpbuf, jpgsize); break; case V4L2_PIX_FMT_YUV422P: memcpy(vbuf, tmpbuf, vc->width * vc->height * 2); break; default: pr_info("s2255: unknown format?\n"); } vc->last_frame = -1; } else { pr_err("s2255: =======no frame\n"); return; } dprintk(dev, 2, "s2255fill at : Buffer %p size= %d\n", vbuf, pos); } /* ------------------------------------------------------------------ Videobuf operations ------------------------------------------------------------------*/ static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct s2255_vc *vc = vb2_get_drv_priv(vq); if (*nbuffers < S2255_MIN_BUFS) *nbuffers = S2255_MIN_BUFS; *nplanes = 1; sizes[0] = vc->width * vc->height * (vc->fmt->depth >> 3); return 0; } static int buffer_prepare(struct vb2_buffer *vb) { struct s2255_vc *vc = vb2_get_drv_priv(vb->vb2_queue); struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct s2255_buffer *buf = container_of(vbuf, struct s2255_buffer, vb); int w = vc->width; int h = vc->height; unsigned long size; dprintk(vc->dev, 4, "%s\n", __func__); if (vc->fmt == NULL) return -EINVAL; if ((w < norm_minw(vc)) || (w > norm_maxw(vc)) || (h < norm_minh(vc)) || (h > norm_maxh(vc))) { dprintk(vc->dev, 4, "invalid buffer prepare\n"); return -EINVAL; } size = w * h * (vc->fmt->depth >> 3); if (vb2_plane_size(vb, 0) < size) { dprintk(vc->dev, 4, "invalid buffer prepare\n"); return -EINVAL; } vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size); return 0; } static void buffer_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct s2255_buffer *buf = container_of(vbuf, struct s2255_buffer, vb); struct s2255_vc *vc = vb2_get_drv_priv(vb->vb2_queue); unsigned long flags = 0; dprintk(vc->dev, 1, "%s\n", __func__); spin_lock_irqsave(&vc->qlock, flags); list_add_tail(&buf->list, &vc->buf_list); spin_unlock_irqrestore(&vc->qlock, flags); } static int start_streaming(struct vb2_queue *vq, unsigned int count); static void stop_streaming(struct vb2_queue *vq); static const struct vb2_ops s2255_video_qops = { .queue_setup = queue_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, .start_streaming = start_streaming, .stop_streaming = stop_streaming, }; static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct s2255_vc *vc = video_drvdata(file); struct s2255_dev *dev = vc->dev; strscpy(cap->driver, "s2255", sizeof(cap->driver)); strscpy(cap->card, "s2255", sizeof(cap->card)); usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); return 0; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { int index = f->index; if (index >= ARRAY_SIZE(formats)) return -EINVAL; if (!jpeg_enable && ((formats[index].fourcc == V4L2_PIX_FMT_JPEG) || (formats[index].fourcc == V4L2_PIX_FMT_MJPEG))) return -EINVAL; f->pixelformat = formats[index].fourcc; return 0; } static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct s2255_vc *vc = video_drvdata(file); int is_ntsc = vc->std & V4L2_STD_525_60; f->fmt.pix.width = vc->width; f->fmt.pix.height = vc->height; if (f->fmt.pix.height >= (is_ntsc ? NUM_LINES_1CIFS_NTSC : NUM_LINES_1CIFS_PAL) * 2) f->fmt.pix.field = V4L2_FIELD_INTERLACED; else f->fmt.pix.field = V4L2_FIELD_TOP; f->fmt.pix.pixelformat = vc->fmt->fourcc; f->fmt.pix.bytesperline = f->fmt.pix.width * (vc->fmt->depth >> 3); f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; return 0; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { const struct s2255_fmt *fmt; enum v4l2_field field; struct s2255_vc *vc = video_drvdata(file); int is_ntsc = vc->std & V4L2_STD_525_60; fmt = format_by_fourcc(f->fmt.pix.pixelformat); if (fmt == NULL) return -EINVAL; dprintk(vc->dev, 50, "%s NTSC: %d suggested width: %d, height: %d\n", __func__, is_ntsc, f->fmt.pix.width, f->fmt.pix.height); if (is_ntsc) { /* NTSC */ if (f->fmt.pix.height >= NUM_LINES_1CIFS_NTSC * 2) { f->fmt.pix.height = NUM_LINES_1CIFS_NTSC * 2; field = V4L2_FIELD_INTERLACED; } else { f->fmt.pix.height = NUM_LINES_1CIFS_NTSC; field = V4L2_FIELD_TOP; } if (f->fmt.pix.width >= LINE_SZ_4CIFS_NTSC) f->fmt.pix.width = LINE_SZ_4CIFS_NTSC; else f->fmt.pix.width = LINE_SZ_1CIFS_NTSC; } else { /* PAL */ if (f->fmt.pix.height >= NUM_LINES_1CIFS_PAL * 2) { f->fmt.pix.height = NUM_LINES_1CIFS_PAL * 2; field = V4L2_FIELD_INTERLACED; } else { f->fmt.pix.height = NUM_LINES_1CIFS_PAL; field = V4L2_FIELD_TOP; } if (f->fmt.pix.width >= LINE_SZ_4CIFS_PAL) f->fmt.pix.width = LINE_SZ_4CIFS_PAL; else f->fmt.pix.width = LINE_SZ_1CIFS_PAL; } f->fmt.pix.field = field; f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; dprintk(vc->dev, 50, "%s: set width %d height %d field %d\n", __func__, f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field); return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct s2255_vc *vc = video_drvdata(file); const struct s2255_fmt *fmt; struct vb2_queue *q = &vc->vb_vidq; struct s2255_mode mode; int ret; ret = vidioc_try_fmt_vid_cap(file, vc, f); if (ret < 0) return ret; fmt = format_by_fourcc(f->fmt.pix.pixelformat); if (fmt == NULL) return -EINVAL; if (vb2_is_busy(q)) { dprintk(vc->dev, 1, "queue busy\n"); return -EBUSY; } mode = vc->mode; vc->fmt = fmt; vc->width = f->fmt.pix.width; vc->height = f->fmt.pix.height; vc->field = f->fmt.pix.field; if (vc->width > norm_minw(vc)) { if (vc->height > norm_minh(vc)) { if (vc->cap_parm.capturemode & V4L2_MODE_HIGHQUALITY) mode.scale = SCALE_4CIFSI; else mode.scale = SCALE_4CIFS; } else mode.scale = SCALE_2CIFS; } else { mode.scale = SCALE_1CIFS; } /* color mode */ switch (vc->fmt->fourcc) { case V4L2_PIX_FMT_GREY: mode.color &= ~MASK_COLOR; mode.color |= COLOR_Y8; break; case V4L2_PIX_FMT_JPEG: case V4L2_PIX_FMT_MJPEG: mode.color &= ~MASK_COLOR; mode.color |= COLOR_JPG; mode.color |= (vc->jpegqual << 8); break; case V4L2_PIX_FMT_YUV422P: mode.color &= ~MASK_COLOR; mode.color |= COLOR_YUVPL; break; case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_UYVY: default: mode.color &= ~MASK_COLOR; mode.color |= COLOR_YUVPK; break; } if ((mode.color & MASK_COLOR) != (vc->mode.color & MASK_COLOR)) mode.restart = 1; else if (mode.scale != vc->mode.scale) mode.restart = 1; else if (mode.format != vc->mode.format) mode.restart = 1; vc->mode = mode; (void) s2255_set_mode(vc, &mode); return 0; } /* write to the configuration pipe, synchronously */ static int s2255_write_config(struct usb_device *udev, unsigned char *pbuf, int size) { int pipe; int done; long retval = -1; if (udev) { pipe = usb_sndbulkpipe(udev, S2255_CONFIG_EP); retval = usb_bulk_msg(udev, pipe, pbuf, size, &done, 500); } return retval; } static u32 get_transfer_size(struct s2255_mode *mode) { int linesPerFrame = LINE_SZ_DEF; int pixelsPerLine = NUM_LINES_DEF; u32 outImageSize; u32 usbInSize; unsigned int mask_mult; if (mode == NULL) return 0; if (mode->format == FORMAT_NTSC) { switch (mode->scale) { case SCALE_4CIFS: case SCALE_4CIFSI: linesPerFrame = NUM_LINES_4CIFS_NTSC * 2; pixelsPerLine = LINE_SZ_4CIFS_NTSC; break; case SCALE_2CIFS: linesPerFrame = NUM_LINES_2CIFS_NTSC; pixelsPerLine = LINE_SZ_2CIFS_NTSC; break; case SCALE_1CIFS: linesPerFrame = NUM_LINES_1CIFS_NTSC; pixelsPerLine = LINE_SZ_1CIFS_NTSC; break; default: break; } } else if (mode->format == FORMAT_PAL) { switch (mode->scale) { case SCALE_4CIFS: case SCALE_4CIFSI: linesPerFrame = NUM_LINES_4CIFS_PAL * 2; pixelsPerLine = LINE_SZ_4CIFS_PAL; break; case SCALE_2CIFS: linesPerFrame = NUM_LINES_2CIFS_PAL; pixelsPerLine = LINE_SZ_2CIFS_PAL; break; case SCALE_1CIFS: linesPerFrame = NUM_LINES_1CIFS_PAL; pixelsPerLine = LINE_SZ_1CIFS_PAL; break; default: break; } } outImageSize = linesPerFrame * pixelsPerLine; if ((mode->color & MASK_COLOR) != COLOR_Y8) { /* 2 bytes/pixel if not monochrome */ outImageSize *= 2; } /* total bytes to send including prefix and 4K padding; must be a multiple of USB_READ_SIZE */ usbInSize = outImageSize + PREFIX_SIZE; /* always send prefix */ mask_mult = 0xffffffffUL - DEF_USB_BLOCK + 1; /* if size not a multiple of USB_READ_SIZE */ if (usbInSize & ~mask_mult) usbInSize = (usbInSize & mask_mult) + (DEF_USB_BLOCK); return usbInSize; } static void s2255_print_cfg(struct s2255_dev *sdev, struct s2255_mode *mode) { struct device *dev = &sdev->udev->dev; dev_info(dev, "------------------------------------------------\n"); dev_info(dev, "format: %d\nscale %d\n", mode->format, mode->scale); dev_info(dev, "fdec: %d\ncolor %d\n", mode->fdec, mode->color); dev_info(dev, "bright: 0x%x\n", mode->bright); dev_info(dev, "------------------------------------------------\n"); } /* * set mode is the function which controls the DSP. * the restart parameter in struct s2255_mode should be set whenever * the image size could change via color format, video system or image * size. * When the restart parameter is set, we sleep for ONE frame to allow the * DSP time to get the new frame */ static int s2255_set_mode(struct s2255_vc *vc, struct s2255_mode *mode) { int res; unsigned long chn_rev; struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev); int i; __le32 *buffer = dev->cmdbuf; mutex_lock(&dev->cmdlock); chn_rev = G_chnmap[vc->idx]; dprintk(dev, 3, "%s channel: %d\n", __func__, vc->idx); /* if JPEG, set the quality */ if ((mode->color & MASK_COLOR) == COLOR_JPG) { mode->color &= ~MASK_COLOR; mode->color |= COLOR_JPG; mode->color &= ~MASK_JPG_QUALITY; mode->color |= (vc->jpegqual << 8); } /* save the mode */ vc->mode = *mode; vc->req_image_size = get_transfer_size(mode); dprintk(dev, 1, "%s: reqsize %ld\n", __func__, vc->req_image_size); /* set the mode */ buffer[0] = IN_DATA_TOKEN; buffer[1] = (__le32) cpu_to_le32(chn_rev); buffer[2] = CMD_SET_MODE; for (i = 0; i < sizeof(struct s2255_mode) / sizeof(u32); i++) buffer[3 + i] = cpu_to_le32(((u32 *)&vc->mode)[i]); vc->setmode_ready = 0; res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512); if (debug) s2255_print_cfg(dev, mode); /* wait at least 3 frames before continuing */ if (mode->restart) { wait_event_timeout(vc->wait_setmode, (vc->setmode_ready != 0), msecs_to_jiffies(S2255_SETMODE_TIMEOUT)); if (vc->setmode_ready != 1) { dprintk(dev, 0, "s2255: no set mode response\n"); res = -EFAULT; } } /* clear the restart flag */ vc->mode.restart = 0; dprintk(dev, 1, "%s chn %d, result: %d\n", __func__, vc->idx, res); mutex_unlock(&dev->cmdlock); return res; } static int s2255_cmd_status(struct s2255_vc *vc, u32 *pstatus) { int res; u32 chn_rev; struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev); __le32 *buffer = dev->cmdbuf; mutex_lock(&dev->cmdlock); chn_rev = G_chnmap[vc->idx]; dprintk(dev, 4, "%s chan %d\n", __func__, vc->idx); /* form the get vid status command */ buffer[0] = IN_DATA_TOKEN; buffer[1] = (__le32) cpu_to_le32(chn_rev); buffer[2] = CMD_STATUS; *pstatus = 0; vc->vidstatus_ready = 0; res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512); wait_event_timeout(vc->wait_vidstatus, (vc->vidstatus_ready != 0), msecs_to_jiffies(S2255_VIDSTATUS_TIMEOUT)); if (vc->vidstatus_ready != 1) { dprintk(dev, 0, "s2255: no vidstatus response\n"); res = -EFAULT; } *pstatus = vc->vidstatus; dprintk(dev, 4, "%s, vid status %d\n", __func__, *pstatus); mutex_unlock(&dev->cmdlock); return res; } static int start_streaming(struct vb2_queue *vq, unsigned int count) { struct s2255_vc *vc = vb2_get_drv_priv(vq); int j; vc->last_frame = -1; vc->bad_payload = 0; vc->cur_frame = 0; vc->frame_count = 0; for (j = 0; j < SYS_FRAMES; j++) { vc->buffer.frame[j].ulState = S2255_READ_IDLE; vc->buffer.frame[j].cur_size = 0; } return s2255_start_acquire(vc); } /* abort streaming and wait for last buffer */ static void stop_streaming(struct vb2_queue *vq) { struct s2255_vc *vc = vb2_get_drv_priv(vq); struct s2255_buffer *buf, *node; unsigned long flags; (void) s2255_stop_acquire(vc); spin_lock_irqsave(&vc->qlock, flags); list_for_each_entry_safe(buf, node, &vc->buf_list, list) { list_del(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); dprintk(vc->dev, 2, "[%p/%d] done\n", buf, buf->vb.vb2_buf.index); } spin_unlock_irqrestore(&vc->qlock, flags); } static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id i) { struct s2255_vc *vc = video_drvdata(file); struct s2255_mode mode; struct vb2_queue *q = &vc->vb_vidq; /* * Changing the standard implies a format change, which is not allowed * while buffers for use with streaming have already been allocated. */ if (vb2_is_busy(q)) return -EBUSY; mode = vc->mode; if (i & V4L2_STD_525_60) { dprintk(vc->dev, 4, "%s 60 Hz\n", __func__); /* if changing format, reset frame decimation/intervals */ if (mode.format != FORMAT_NTSC) { mode.restart = 1; mode.format = FORMAT_NTSC; mode.fdec = FDEC_1; vc->width = LINE_SZ_4CIFS_NTSC; vc->height = NUM_LINES_4CIFS_NTSC * 2; } } else if (i & V4L2_STD_625_50) { dprintk(vc->dev, 4, "%s 50 Hz\n", __func__); if (mode.format != FORMAT_PAL) { mode.restart = 1; mode.format = FORMAT_PAL; mode.fdec = FDEC_1; vc->width = LINE_SZ_4CIFS_PAL; vc->height = NUM_LINES_4CIFS_PAL * 2; } } else return -EINVAL; vc->std = i; if (mode.restart) s2255_set_mode(vc, &mode); return 0; } static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *i) { struct s2255_vc *vc = video_drvdata(file); *i = vc->std; return 0; } /* Sensoray 2255 is a multiple channel capture device. It does not have a "crossbar" of inputs. We use one V4L device per channel. The user must be aware that certain combinations are not allowed. For instance, you cannot do full FPS on more than 2 channels(2 videodevs) at once in color(you can do full fps on 4 channels with greyscale. */ static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *inp) { struct s2255_vc *vc = video_drvdata(file); struct s2255_dev *dev = vc->dev; u32 status = 0; if (inp->index != 0) return -EINVAL; inp->type = V4L2_INPUT_TYPE_CAMERA; inp->std = S2255_NORMS; inp->status = 0; if (dev->dsp_fw_ver >= S2255_MIN_DSP_STATUS) { int rc; rc = s2255_cmd_status(vc, &status); dprintk(dev, 4, "s2255_cmd_status rc: %d status %x\n", rc, status); if (rc == 0) inp->status = (status & 0x01) ? 0 : V4L2_IN_ST_NO_SIGNAL; } switch (dev->pid) { case 0x2255: default: strscpy(inp->name, "Composite", sizeof(inp->name)); break; case 0x2257: strscpy(inp->name, (vc->idx < 2) ? "Composite" : "S-Video", sizeof(inp->name)); break; } return 0; } static int vidioc_g_input(struct file *file, void *priv, unsigned int *i) { *i = 0; return 0; } static int vidioc_s_input(struct file *file, void *priv, unsigned int i) { if (i > 0) return -EINVAL; return 0; } static int s2255_s_ctrl(struct v4l2_ctrl *ctrl) { struct s2255_vc *vc = container_of(ctrl->handler, struct s2255_vc, hdl); struct s2255_mode mode; mode = vc->mode; /* update the mode to the corresponding value */ switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: mode.bright = ctrl->val; break; case V4L2_CID_CONTRAST: mode.contrast = ctrl->val; break; case V4L2_CID_HUE: mode.hue = ctrl->val; break; case V4L2_CID_SATURATION: mode.saturation = ctrl->val; break; case V4L2_CID_S2255_COLORFILTER: mode.color &= ~MASK_INPUT_TYPE; mode.color |= !ctrl->val << 16; break; case V4L2_CID_JPEG_COMPRESSION_QUALITY: vc->jpegqual = ctrl->val; return 0; default: return -EINVAL; } mode.restart = 0; /* set mode here. Note: stream does not need restarted. some V4L programs restart stream unnecessarily after a s_crtl. */ s2255_set_mode(vc, &mode); return 0; } static int vidioc_g_jpegcomp(struct file *file, void *priv, struct v4l2_jpegcompression *jc) { struct s2255_vc *vc = video_drvdata(file); memset(jc, 0, sizeof(*jc)); jc->quality = vc->jpegqual; dprintk(vc->dev, 2, "%s: quality %d\n", __func__, jc->quality); return 0; } static int vidioc_s_jpegcomp(struct file *file, void *priv, const struct v4l2_jpegcompression *jc) { struct s2255_vc *vc = video_drvdata(file); if (jc->quality < 0 || jc->quality > 100) return -EINVAL; v4l2_ctrl_s_ctrl(vc->jpegqual_ctrl, jc->quality); dprintk(vc->dev, 2, "%s: quality %d\n", __func__, jc->quality); return 0; } static int vidioc_g_parm(struct file *file, void *priv, struct v4l2_streamparm *sp) { __u32 def_num, def_dem; struct s2255_vc *vc = video_drvdata(file); if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; sp->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; sp->parm.capture.capturemode = vc->cap_parm.capturemode; sp->parm.capture.readbuffers = S2255_MIN_BUFS; def_num = (vc->mode.format == FORMAT_NTSC) ? 1001 : 1000; def_dem = (vc->mode.format == FORMAT_NTSC) ? 30000 : 25000; sp->parm.capture.timeperframe.denominator = def_dem; switch (vc->mode.fdec) { default: case FDEC_1: sp->parm.capture.timeperframe.numerator = def_num; break; case FDEC_2: sp->parm.capture.timeperframe.numerator = def_num * 2; break; case FDEC_3: sp->parm.capture.timeperframe.numerator = def_num * 3; break; case FDEC_5: sp->parm.capture.timeperframe.numerator = def_num * 5; break; } dprintk(vc->dev, 4, "%s capture mode, %d timeperframe %d/%d\n", __func__, sp->parm.capture.capturemode, sp->parm.capture.timeperframe.numerator, sp->parm.capture.timeperframe.denominator); return 0; } static int vidioc_s_parm(struct file *file, void *priv, struct v4l2_streamparm *sp) { struct s2255_vc *vc = video_drvdata(file); struct s2255_mode mode; int fdec = FDEC_1; __u32 def_num, def_dem; if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; mode = vc->mode; /* high quality capture mode requires a stream restart */ if ((vc->cap_parm.capturemode != sp->parm.capture.capturemode) && vb2_is_streaming(&vc->vb_vidq)) return -EBUSY; def_num = (mode.format == FORMAT_NTSC) ? 1001 : 1000; def_dem = (mode.format == FORMAT_NTSC) ? 30000 : 25000; if (def_dem != sp->parm.capture.timeperframe.denominator) sp->parm.capture.timeperframe.numerator = def_num; else if (sp->parm.capture.timeperframe.numerator <= def_num) sp->parm.capture.timeperframe.numerator = def_num; else if (sp->parm.capture.timeperframe.numerator <= (def_num * 2)) { sp->parm.capture.timeperframe.numerator = def_num * 2; fdec = FDEC_2; } else if (sp->parm.capture.timeperframe.numerator <= (def_num * 3)) { sp->parm.capture.timeperframe.numerator = def_num * 3; fdec = FDEC_3; } else { sp->parm.capture.timeperframe.numerator = def_num * 5; fdec = FDEC_5; } mode.fdec = fdec; sp->parm.capture.timeperframe.denominator = def_dem; sp->parm.capture.readbuffers = S2255_MIN_BUFS; s2255_set_mode(vc, &mode); dprintk(vc->dev, 4, "%s capture mode, %d timeperframe %d/%d, fdec %d\n", __func__, sp->parm.capture.capturemode, sp->parm.capture.timeperframe.numerator, sp->parm.capture.timeperframe.denominator, fdec); return 0; } #define NUM_SIZE_ENUMS 3 static const struct v4l2_frmsize_discrete ntsc_sizes[] = { { 640, 480 }, { 640, 240 }, { 320, 240 }, }; static const struct v4l2_frmsize_discrete pal_sizes[] = { { 704, 576 }, { 704, 288 }, { 352, 288 }, }; static int vidioc_enum_framesizes(struct file *file, void *priv, struct v4l2_frmsizeenum *fe) { struct s2255_vc *vc = video_drvdata(file); int is_ntsc = vc->std & V4L2_STD_525_60; const struct s2255_fmt *fmt; if (fe->index >= NUM_SIZE_ENUMS) return -EINVAL; fmt = format_by_fourcc(fe->pixel_format); if (fmt == NULL) return -EINVAL; fe->type = V4L2_FRMSIZE_TYPE_DISCRETE; fe->discrete = is_ntsc ? ntsc_sizes[fe->index] : pal_sizes[fe->index]; return 0; } static int vidioc_enum_frameintervals(struct file *file, void *priv, struct v4l2_frmivalenum *fe) { struct s2255_vc *vc = video_drvdata(file); const struct s2255_fmt *fmt; const struct v4l2_frmsize_discrete *sizes; int is_ntsc = vc->std & V4L2_STD_525_60; #define NUM_FRAME_ENUMS 4 int frm_dec[NUM_FRAME_ENUMS] = {1, 2, 3, 5}; int i; if (fe->index >= NUM_FRAME_ENUMS) return -EINVAL; fmt = format_by_fourcc(fe->pixel_format); if (fmt == NULL) return -EINVAL; sizes = is_ntsc ? ntsc_sizes : pal_sizes; for (i = 0; i < NUM_SIZE_ENUMS; i++, sizes++) if (fe->width == sizes->width && fe->height == sizes->height) break; if (i == NUM_SIZE_ENUMS) return -EINVAL; fe->type = V4L2_FRMIVAL_TYPE_DISCRETE; fe->discrete.denominator = is_ntsc ? 30000 : 25000; fe->discrete.numerator = (is_ntsc ? 1001 : 1000) * frm_dec[fe->index]; dprintk(vc->dev, 4, "%s discrete %d/%d\n", __func__, fe->discrete.numerator, fe->discrete.denominator); return 0; } static int s2255_open(struct file *file) { struct s2255_vc *vc = video_drvdata(file); struct s2255_dev *dev = vc->dev; int state; int rc = 0; rc = v4l2_fh_open(file); if (rc != 0) return rc; dprintk(dev, 1, "s2255: %s\n", __func__); state = atomic_read(&dev->fw_data->fw_state); switch (state) { case S2255_FW_DISCONNECTING: return -ENODEV; case S2255_FW_FAILED: s2255_dev_err(&dev->udev->dev, "firmware load failed. retrying.\n"); s2255_fwload_start(dev); wait_event_timeout(dev->fw_data->wait_fw, ((atomic_read(&dev->fw_data->fw_state) == S2255_FW_SUCCESS) || (atomic_read(&dev->fw_data->fw_state) == S2255_FW_DISCONNECTING)), msecs_to_jiffies(S2255_LOAD_TIMEOUT)); /* state may have changed, re-read */ state = atomic_read(&dev->fw_data->fw_state); break; case S2255_FW_NOTLOADED: case S2255_FW_LOADED_DSPWAIT: /* give S2255_LOAD_TIMEOUT time for firmware to load in case driver loaded and then device immediately opened */ pr_info("%s waiting for firmware load\n", __func__); wait_event_timeout(dev->fw_data->wait_fw, ((atomic_read(&dev->fw_data->fw_state) == S2255_FW_SUCCESS) || (atomic_read(&dev->fw_data->fw_state) == S2255_FW_DISCONNECTING)), msecs_to_jiffies(S2255_LOAD_TIMEOUT)); /* state may have changed, re-read */ state = atomic_read(&dev->fw_data->fw_state); break; case S2255_FW_SUCCESS: default: break; } /* state may have changed in above switch statement */ switch (state) { case S2255_FW_SUCCESS: break; case S2255_FW_FAILED: pr_info("2255 firmware load failed.\n"); return -ENODEV; case S2255_FW_DISCONNECTING: pr_info("%s: disconnecting\n", __func__); return -ENODEV; case S2255_FW_LOADED_DSPWAIT: case S2255_FW_NOTLOADED: pr_info("%s: firmware not loaded, please retry\n", __func__); /* * Timeout on firmware load means device unusable. * Set firmware failure state. * On next s2255_open the firmware will be reloaded. */ atomic_set(&dev->fw_data->fw_state, S2255_FW_FAILED); return -EAGAIN; default: pr_info("%s: unknown state\n", __func__); return -EFAULT; } if (!vc->configured) { /* configure channel to default state */ vc->fmt = &formats[0]; s2255_set_mode(vc, &vc->mode); vc->configured = 1; } return 0; } static void s2255_destroy(struct s2255_dev *dev) { dprintk(dev, 1, "%s", __func__); /* board shutdown stops the read pipe if it is running */ s2255_board_shutdown(dev); /* make sure firmware still not trying to load */ timer_shutdown_sync(&dev->timer); /* only started in .probe and .open */ if (dev->fw_data->fw_urb) { usb_kill_urb(dev->fw_data->fw_urb); usb_free_urb(dev->fw_data->fw_urb); dev->fw_data->fw_urb = NULL; } release_firmware(dev->fw_data->fw); kfree(dev->fw_data->pfw_data); kfree(dev->fw_data); /* reset the DSP so firmware can be reloaded next time */ s2255_reset_dsppower(dev); mutex_destroy(&dev->lock); usb_put_dev(dev->udev); v4l2_device_unregister(&dev->v4l2_dev); kfree(dev->cmdbuf); kfree(dev); } static const struct v4l2_file_operations s2255_fops_v4l = { .owner = THIS_MODULE, .open = s2255_open, .release = vb2_fop_release, .poll = vb2_fop_poll, .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */ .mmap = vb2_fop_mmap, .read = vb2_fop_read, }; static const struct v4l2_ioctl_ops s2255_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_s_std = vidioc_s_std, .vidioc_g_std = vidioc_g_std, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_s_jpegcomp = vidioc_s_jpegcomp, .vidioc_g_jpegcomp = vidioc_g_jpegcomp, .vidioc_s_parm = vidioc_s_parm, .vidioc_g_parm = vidioc_g_parm, .vidioc_enum_framesizes = vidioc_enum_framesizes, .vidioc_enum_frameintervals = vidioc_enum_frameintervals, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; static void s2255_video_device_release(struct video_device *vdev) { struct s2255_dev *dev = to_s2255_dev(vdev->v4l2_dev); struct s2255_vc *vc = container_of(vdev, struct s2255_vc, vdev); dprintk(dev, 4, "%s, chnls: %d\n", __func__, refcount_read(&dev->num_channels)); v4l2_ctrl_handler_free(&vc->hdl); if (refcount_dec_and_test(&dev->num_channels)) s2255_destroy(dev); return; } static const struct video_device template = { .name = "s2255v", .fops = &s2255_fops_v4l, .ioctl_ops = &s2255_ioctl_ops, .release = s2255_video_device_release, .tvnorms = S2255_NORMS, }; static const struct v4l2_ctrl_ops s2255_ctrl_ops = { .s_ctrl = s2255_s_ctrl, }; static const struct v4l2_ctrl_config color_filter_ctrl = { .ops = &s2255_ctrl_ops, .name = "Color Filter", .id = V4L2_CID_S2255_COLORFILTER, .type = V4L2_CTRL_TYPE_BOOLEAN, .max = 1, .step = 1, .def = 1, }; static int s2255_probe_v4l(struct s2255_dev *dev) { int ret; int i; int cur_nr = video_nr; struct s2255_vc *vc; struct vb2_queue *q; ret = v4l2_device_register(&dev->interface->dev, &dev->v4l2_dev); if (ret) return ret; /* initialize all video 4 linux */ /* register 4 video devices */ for (i = 0; i < MAX_CHANNELS; i++) { vc = &dev->vc[i]; INIT_LIST_HEAD(&vc->buf_list); v4l2_ctrl_handler_init(&vc->hdl, 6); v4l2_ctrl_new_std(&vc->hdl, &s2255_ctrl_ops, V4L2_CID_BRIGHTNESS, -127, 127, 1, DEF_BRIGHT); v4l2_ctrl_new_std(&vc->hdl, &s2255_ctrl_ops, V4L2_CID_CONTRAST, 0, 255, 1, DEF_CONTRAST); v4l2_ctrl_new_std(&vc->hdl, &s2255_ctrl_ops, V4L2_CID_SATURATION, 0, 255, 1, DEF_SATURATION); v4l2_ctrl_new_std(&vc->hdl, &s2255_ctrl_ops, V4L2_CID_HUE, 0, 255, 1, DEF_HUE); vc->jpegqual_ctrl = v4l2_ctrl_new_std(&vc->hdl, &s2255_ctrl_ops, V4L2_CID_JPEG_COMPRESSION_QUALITY, 0, 100, 1, S2255_DEF_JPEG_QUAL); if (dev->dsp_fw_ver >= S2255_MIN_DSP_COLORFILTER && (dev->pid != 0x2257 || vc->idx <= 1)) v4l2_ctrl_new_custom(&vc->hdl, &color_filter_ctrl, NULL); if (vc->hdl.error) { ret = vc->hdl.error; v4l2_ctrl_handler_free(&vc->hdl); dev_err(&dev->udev->dev, "couldn't register control\n"); break; } q = &vc->vb_vidq; q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_READ | VB2_USERPTR; q->drv_priv = vc; q->lock = &vc->vb_lock; q->buf_struct_size = sizeof(struct s2255_buffer); q->mem_ops = &vb2_vmalloc_memops; q->ops = &s2255_video_qops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; ret = vb2_queue_init(q); if (ret != 0) { dev_err(&dev->udev->dev, "%s vb2_queue_init 0x%x\n", __func__, ret); break; } /* register video devices */ vc->vdev = template; vc->vdev.queue = q; vc->vdev.ctrl_handler = &vc->hdl; vc->vdev.lock = &dev->lock; vc->vdev.v4l2_dev = &dev->v4l2_dev; vc->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; video_set_drvdata(&vc->vdev, vc); if (video_nr == -1) ret = video_register_device(&vc->vdev, VFL_TYPE_VIDEO, video_nr); else ret = video_register_device(&vc->vdev, VFL_TYPE_VIDEO, cur_nr + i); if (ret) { dev_err(&dev->udev->dev, "failed to register video device!\n"); break; } refcount_inc(&dev->num_channels); v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n", video_device_node_name(&vc->vdev)); } pr_info("Sensoray 2255 V4L driver Revision: %s\n", S2255_VERSION); /* if no channels registered, return error and probe will fail*/ if (refcount_read(&dev->num_channels) == 0) { v4l2_device_unregister(&dev->v4l2_dev); return ret; } if (refcount_read(&dev->num_channels) != MAX_CHANNELS) pr_warn("s2255: Not all channels available.\n"); return 0; } /* this function moves the usb stream read pipe data * into the system buffers. * returns 0 on success, EAGAIN if more data to process( call this * function again). * * Received frame structure: * bytes 0-3: marker : 0x2255DA4AL (S2255_MARKER_FRAME) * bytes 4-7: channel: 0-3 * bytes 8-11: payload size: size of the frame * bytes 12-payloadsize+12: frame data */ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info) { char *pdest; u32 offset = 0; int bframe = 0; char *psrc; unsigned long copy_size; unsigned long size; s32 idx = -1; struct s2255_framei *frm; unsigned char *pdata; struct s2255_vc *vc; dprintk(dev, 100, "buffer to user\n"); vc = &dev->vc[dev->cc]; idx = vc->cur_frame; frm = &vc->buffer.frame[idx]; if (frm->ulState == S2255_READ_IDLE) { int jj; unsigned int cc; __le32 *pdword; /*data from dsp is little endian */ int payload; /* search for marker codes */ pdata = (unsigned char *)pipe_info->transfer_buffer; pdword = (__le32 *)pdata; for (jj = 0; jj < (pipe_info->cur_transfer_size - 12); jj++) { switch (*pdword) { case S2255_MARKER_FRAME: dprintk(dev, 4, "marker @ offset: %d [%x %x]\n", jj, pdata[0], pdata[1]); offset = jj + PREFIX_SIZE; bframe = 1; cc = le32_to_cpu(pdword[1]); if (cc >= MAX_CHANNELS) { dprintk(dev, 0, "bad channel\n"); return -EINVAL; } /* reverse it */ dev->cc = G_chnmap[cc]; vc = &dev->vc[dev->cc]; payload = le32_to_cpu(pdword[3]); if (payload > vc->req_image_size) { vc->bad_payload++; /* discard the bad frame */ return -EINVAL; } vc->pkt_size = payload; vc->jpg_size = le32_to_cpu(pdword[4]); break; case S2255_MARKER_RESPONSE: pdata += DEF_USB_BLOCK; jj += DEF_USB_BLOCK; if (le32_to_cpu(pdword[1]) >= MAX_CHANNELS) break; cc = G_chnmap[le32_to_cpu(pdword[1])]; if (cc >= MAX_CHANNELS) break; vc = &dev->vc[cc]; switch (pdword[2]) { case S2255_RESPONSE_SETMODE: /* check if channel valid */ /* set mode ready */ vc->setmode_ready = 1; wake_up(&vc->wait_setmode); dprintk(dev, 5, "setmode rdy %d\n", cc); break; case S2255_RESPONSE_FW: dev->chn_ready |= (1 << cc); if ((dev->chn_ready & 0x0f) != 0x0f) break; /* all channels ready */ pr_info("s2255: fw loaded\n"); atomic_set(&dev->fw_data->fw_state, S2255_FW_SUCCESS); wake_up(&dev->fw_data->wait_fw); break; case S2255_RESPONSE_STATUS: vc->vidstatus = le32_to_cpu(pdword[3]); vc->vidstatus_ready = 1; wake_up(&vc->wait_vidstatus); dprintk(dev, 5, "vstat %x chan %d\n", le32_to_cpu(pdword[3]), cc); break; default: pr_info("s2255 unknown resp\n"); } pdata++; break; default: pdata++; break; } if (bframe) break; } /* for */ if (!bframe) return -EINVAL; } vc = &dev->vc[dev->cc]; idx = vc->cur_frame; frm = &vc->buffer.frame[idx]; /* search done. now find out if should be acquiring on this channel */ if (!vb2_is_streaming(&vc->vb_vidq)) { /* we found a frame, but this channel is turned off */ frm->ulState = S2255_READ_IDLE; return -EINVAL; } if (frm->ulState == S2255_READ_IDLE) { frm->ulState = S2255_READ_FRAME; frm->cur_size = 0; } /* skip the marker 512 bytes (and offset if out of sync) */ psrc = (u8 *)pipe_info->transfer_buffer + offset; if (frm->lpvbits == NULL) { dprintk(dev, 1, "s2255 frame buffer == NULL.%p %p %d %d", frm, dev, dev->cc, idx); return -ENOMEM; } pdest = frm->lpvbits + frm->cur_size; copy_size = (pipe_info->cur_transfer_size - offset); size = vc->pkt_size - PREFIX_SIZE; /* sanity check on pdest */ if ((copy_size + frm->cur_size) < vc->req_image_size) memcpy(pdest, psrc, copy_size); frm->cur_size += copy_size; dprintk(dev, 4, "cur_size: %lu, size: %lu\n", frm->cur_size, size); if (frm->cur_size >= size) { dprintk(dev, 2, "******[%d]Buffer[%d]full*******\n", dev->cc, idx); vc->last_frame = vc->cur_frame; vc->cur_frame++; /* end of system frame ring buffer, start at zero */ if ((vc->cur_frame == SYS_FRAMES) || (vc->cur_frame == vc->buffer.dwFrames)) vc->cur_frame = 0; /* frame ready */ if (vb2_is_streaming(&vc->vb_vidq)) s2255_got_frame(vc, vc->jpg_size); vc->frame_count++; frm->ulState = S2255_READ_IDLE; frm->cur_size = 0; } /* done successfully */ return 0; } static void s2255_read_video_callback(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info) { int res; dprintk(dev, 50, "callback read video\n"); if (dev->cc >= MAX_CHANNELS) { dev->cc = 0; dev_err(&dev->udev->dev, "invalid channel\n"); return; } /* otherwise copy to the system buffers */ res = save_frame(dev, pipe_info); if (res != 0) dprintk(dev, 4, "s2255: read callback failed\n"); dprintk(dev, 50, "callback read video done\n"); return; } static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request, u16 Index, u16 Value, void *TransferBuffer, s32 TransferBufferLength, int bOut) { int r; unsigned char *buf; buf = kmalloc(TransferBufferLength, GFP_KERNEL); if (!buf) return -ENOMEM; if (!bOut) { r = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, Value, Index, buf, TransferBufferLength, USB_CTRL_SET_TIMEOUT); if (r >= 0) memcpy(TransferBuffer, buf, TransferBufferLength); } else { memcpy(buf, TransferBuffer, TransferBufferLength); r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE, Value, Index, buf, TransferBufferLength, USB_CTRL_SET_TIMEOUT); } kfree(buf); return r; } /* * retrieve FX2 firmware version. future use. * @param dev pointer to device extension * @return -1 for fail, else returns firmware version as an int(16 bits) */ static int s2255_get_fx2fw(struct s2255_dev *dev) { int fw; int ret; u8 transBuffer[2] = {}; ret = s2255_vendor_req(dev, S2255_VR_FW, 0, 0, transBuffer, sizeof(transBuffer), S2255_VR_IN); if (ret < 0) dprintk(dev, 2, "get fw error: %x\n", ret); fw = transBuffer[0] + (transBuffer[1] << 8); dprintk(dev, 2, "Get FW %x %x\n", transBuffer[0], transBuffer[1]); return fw; } /* * Create the system ring buffer to copy frames into from the * usb read pipe. */ static int s2255_create_sys_buffers(struct s2255_vc *vc) { unsigned long i; unsigned long reqsize; vc->buffer.dwFrames = SYS_FRAMES; /* always allocate maximum size(PAL) for system buffers */ reqsize = SYS_FRAMES_MAXSIZE; if (reqsize > SYS_FRAMES_MAXSIZE) reqsize = SYS_FRAMES_MAXSIZE; for (i = 0; i < SYS_FRAMES; i++) { /* allocate the frames */ vc->buffer.frame[i].lpvbits = vmalloc(reqsize); vc->buffer.frame[i].size = reqsize; if (vc->buffer.frame[i].lpvbits == NULL) { pr_info("out of memory. using less frames\n"); vc->buffer.dwFrames = i; break; } } /* make sure internal states are set */ for (i = 0; i < SYS_FRAMES; i++) { vc->buffer.frame[i].ulState = 0; vc->buffer.frame[i].cur_size = 0; } vc->cur_frame = 0; vc->last_frame = -1; return 0; } static int s2255_release_sys_buffers(struct s2255_vc *vc) { unsigned long i; for (i = 0; i < SYS_FRAMES; i++) { vfree(vc->buffer.frame[i].lpvbits); vc->buffer.frame[i].lpvbits = NULL; } return 0; } static int s2255_board_init(struct s2255_dev *dev) { struct s2255_mode mode_def = DEF_MODEI_NTSC_CONT; int fw_ver; int j; struct s2255_pipeinfo *pipe = &dev->pipe; dprintk(dev, 4, "board init: %p", dev); memset(pipe, 0, sizeof(*pipe)); pipe->dev = dev; pipe->cur_transfer_size = S2255_USB_XFER_SIZE; pipe->max_transfer_size = S2255_USB_XFER_SIZE; pipe->transfer_buffer = kzalloc(pipe->max_transfer_size, GFP_KERNEL); if (pipe->transfer_buffer == NULL) { dprintk(dev, 1, "out of memory!\n"); return -ENOMEM; } /* query the firmware */ fw_ver = s2255_get_fx2fw(dev); pr_info("s2255: usb firmware version %d.%d\n", (fw_ver >> 8) & 0xff, fw_ver & 0xff); if (fw_ver < S2255_CUR_USB_FWVER) pr_info("s2255: newer USB firmware available\n"); for (j = 0; j < MAX_CHANNELS; j++) { struct s2255_vc *vc = &dev->vc[j]; vc->mode = mode_def; if (dev->pid == 0x2257 && j > 1) vc->mode.color |= (1 << 16); vc->jpegqual = S2255_DEF_JPEG_QUAL; vc->width = LINE_SZ_4CIFS_NTSC; vc->height = NUM_LINES_4CIFS_NTSC * 2; vc->std = V4L2_STD_NTSC_M; vc->fmt = &formats[0]; vc->mode.restart = 1; vc->req_image_size = get_transfer_size(&mode_def); vc->frame_count = 0; /* create the system buffers */ s2255_create_sys_buffers(vc); } /* start read pipe */ s2255_start_readpipe(dev); dprintk(dev, 1, "%s: success\n", __func__); return 0; } static int s2255_board_shutdown(struct s2255_dev *dev) { u32 i; dprintk(dev, 1, "%s: dev: %p", __func__, dev); for (i = 0; i < MAX_CHANNELS; i++) { if (vb2_is_streaming(&dev->vc[i].vb_vidq)) s2255_stop_acquire(&dev->vc[i]); } s2255_stop_readpipe(dev); for (i = 0; i < MAX_CHANNELS; i++) s2255_release_sys_buffers(&dev->vc[i]); /* release transfer buffer */ kfree(dev->pipe.transfer_buffer); return 0; } static void read_pipe_completion(struct urb *purb) { struct s2255_pipeinfo *pipe_info; struct s2255_dev *dev; int status; int pipe; pipe_info = purb->context; if (pipe_info == NULL) { dev_err(&purb->dev->dev, "no context!\n"); return; } dev = pipe_info->dev; if (dev == NULL) { dev_err(&purb->dev->dev, "no context!\n"); return; } status = purb->status; /* if shutting down, do not resubmit, exit immediately */ if (status == -ESHUTDOWN) { dprintk(dev, 2, "%s: err shutdown\n", __func__); pipe_info->err_count++; return; } if (pipe_info->state == 0) { dprintk(dev, 2, "%s: exiting USB pipe", __func__); return; } if (status == 0) s2255_read_video_callback(dev, pipe_info); else { pipe_info->err_count++; dprintk(dev, 1, "%s: failed URB %d\n", __func__, status); } pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint); /* reuse urb */ usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev, pipe, pipe_info->transfer_buffer, pipe_info->cur_transfer_size, read_pipe_completion, pipe_info); if (pipe_info->state != 0) { if (usb_submit_urb(pipe_info->stream_urb, GFP_ATOMIC)) dev_err(&dev->udev->dev, "error submitting urb\n"); } else { dprintk(dev, 2, "%s :complete state 0\n", __func__); } return; } static int s2255_start_readpipe(struct s2255_dev *dev) { int pipe; int retval; struct s2255_pipeinfo *pipe_info = &dev->pipe; pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint); dprintk(dev, 2, "%s: IN %d\n", __func__, dev->read_endpoint); pipe_info->state = 1; pipe_info->err_count = 0; pipe_info->stream_urb = usb_alloc_urb(0, GFP_KERNEL); if (!pipe_info->stream_urb) return -ENOMEM; /* transfer buffer allocated in board_init */ usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev, pipe, pipe_info->transfer_buffer, pipe_info->cur_transfer_size, read_pipe_completion, pipe_info); retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL); if (retval) { pr_err("s2255: start read pipe failed\n"); return retval; } return 0; } /* starts acquisition process */ static int s2255_start_acquire(struct s2255_vc *vc) { int res; unsigned long chn_rev; int j; struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev); __le32 *buffer = dev->cmdbuf; mutex_lock(&dev->cmdlock); chn_rev = G_chnmap[vc->idx]; vc->last_frame = -1; vc->bad_payload = 0; vc->cur_frame = 0; for (j = 0; j < SYS_FRAMES; j++) { vc->buffer.frame[j].ulState = 0; vc->buffer.frame[j].cur_size = 0; } /* send the start command */ buffer[0] = IN_DATA_TOKEN; buffer[1] = (__le32) cpu_to_le32(chn_rev); buffer[2] = CMD_START; res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512); if (res != 0) dev_err(&dev->udev->dev, "CMD_START error\n"); dprintk(dev, 2, "start acquire exit[%d] %d\n", vc->idx, res); mutex_unlock(&dev->cmdlock); return res; } static int s2255_stop_acquire(struct s2255_vc *vc) { int res; unsigned long chn_rev; struct s2255_dev *dev = to_s2255_dev(vc->vdev.v4l2_dev); __le32 *buffer = dev->cmdbuf; mutex_lock(&dev->cmdlock); chn_rev = G_chnmap[vc->idx]; /* send the stop command */ buffer[0] = IN_DATA_TOKEN; buffer[1] = (__le32) cpu_to_le32(chn_rev); buffer[2] = CMD_STOP; res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512); if (res != 0) dev_err(&dev->udev->dev, "CMD_STOP error\n"); dprintk(dev, 4, "%s: chn %d, res %d\n", __func__, vc->idx, res); mutex_unlock(&dev->cmdlock); return res; } static void s2255_stop_readpipe(struct s2255_dev *dev) { struct s2255_pipeinfo *pipe = &dev->pipe; pipe->state = 0; if (pipe->stream_urb) { /* cancel urb */ usb_kill_urb(pipe->stream_urb); usb_free_urb(pipe->stream_urb); pipe->stream_urb = NULL; } dprintk(dev, 4, "%s", __func__); return; } static void s2255_fwload_start(struct s2255_dev *dev) { s2255_reset_dsppower(dev); dev->fw_data->fw_size = dev->fw_data->fw->size; atomic_set(&dev->fw_data->fw_state, S2255_FW_NOTLOADED); memcpy(dev->fw_data->pfw_data, dev->fw_data->fw->data, CHUNK_SIZE); dev->fw_data->fw_loaded = CHUNK_SIZE; usb_fill_bulk_urb(dev->fw_data->fw_urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), dev->fw_data->pfw_data, CHUNK_SIZE, s2255_fwchunk_complete, dev->fw_data); mod_timer(&dev->timer, jiffies + HZ); } /* standard usb probe function */ static int s2255_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct s2255_dev *dev = NULL; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int i; int retval = -ENOMEM; __le32 *pdata; int fw_size; /* allocate memory for our device state and initialize it to zero */ dev = kzalloc(sizeof(struct s2255_dev), GFP_KERNEL); if (dev == NULL) { s2255_dev_err(&interface->dev, "out of memory\n"); return -ENOMEM; } dev->cmdbuf = kzalloc(S2255_CMDBUF_SIZE, GFP_KERNEL); if (dev->cmdbuf == NULL) { s2255_dev_err(&interface->dev, "out of memory\n"); goto errorFWDATA1; } refcount_set(&dev->num_channels, 0); dev->pid = id->idProduct; dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL); if (!dev->fw_data) goto errorFWDATA1; mutex_init(&dev->lock); mutex_init(&dev->cmdlock); /* grab usb_device and save it */ dev->udev = usb_get_dev(interface_to_usbdev(interface)); if (dev->udev == NULL) { dev_err(&interface->dev, "null usb device\n"); retval = -ENODEV; goto errorUDEV; } dev_dbg(&interface->dev, "dev: %p, udev %p interface %p\n", dev, dev->udev, interface); dev->interface = interface; /* set up the endpoint information */ iface_desc = interface->cur_altsetting; dev_dbg(&interface->dev, "num EP: %d\n", iface_desc->desc.bNumEndpoints); for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (!dev->read_endpoint && usb_endpoint_is_bulk_in(endpoint)) { /* we found the bulk in endpoint */ dev->read_endpoint = endpoint->bEndpointAddress; } } if (!dev->read_endpoint) { dev_err(&interface->dev, "Could not find bulk-in endpoint\n"); goto errorEP; } timer_setup(&dev->timer, s2255_timer, 0); init_waitqueue_head(&dev->fw_data->wait_fw); for (i = 0; i < MAX_CHANNELS; i++) { struct s2255_vc *vc = &dev->vc[i]; vc->idx = i; vc->dev = dev; init_waitqueue_head(&vc->wait_setmode); init_waitqueue_head(&vc->wait_vidstatus); spin_lock_init(&vc->qlock); mutex_init(&vc->vb_lock); } dev->fw_data->fw_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->fw_data->fw_urb) goto errorFWURB; dev->fw_data->pfw_data = kzalloc(CHUNK_SIZE, GFP_KERNEL); if (!dev->fw_data->pfw_data) { dev_err(&interface->dev, "out of memory!\n"); goto errorFWDATA2; } /* load the first chunk */ if (request_firmware(&dev->fw_data->fw, FIRMWARE_FILE_NAME, &dev->udev->dev)) { dev_err(&interface->dev, "sensoray 2255 failed to get firmware\n"); goto errorREQFW; } /* check the firmware is valid */ fw_size = dev->fw_data->fw->size; pdata = (__le32 *) &dev->fw_data->fw->data[fw_size - 8]; if (*pdata != S2255_FW_MARKER) { dev_err(&interface->dev, "Firmware invalid.\n"); retval = -ENODEV; goto errorFWMARKER; } else { /* make sure firmware is the latest */ __le32 *pRel; pRel = (__le32 *) &dev->fw_data->fw->data[fw_size - 4]; pr_info("s2255 dsp fw version %x\n", le32_to_cpu(*pRel)); dev->dsp_fw_ver = le32_to_cpu(*pRel); if (dev->dsp_fw_ver < S2255_CUR_DSP_FWVER) pr_info("s2255: f2255usb.bin out of date.\n"); if (dev->pid == 0x2257 && dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER) pr_warn("2257 needs firmware %d or above.\n", S2255_MIN_DSP_COLORFILTER); } usb_reset_device(dev->udev); /* load 2255 board specific */ retval = s2255_board_init(dev); if (retval) goto errorBOARDINIT; s2255_fwload_start(dev); /* loads v4l specific */ retval = s2255_probe_v4l(dev); if (retval) goto errorBOARDINIT; dev_info(&interface->dev, "Sensoray 2255 detected\n"); return 0; errorBOARDINIT: s2255_board_shutdown(dev); errorFWMARKER: release_firmware(dev->fw_data->fw); errorREQFW: kfree(dev->fw_data->pfw_data); errorFWDATA2: usb_free_urb(dev->fw_data->fw_urb); errorFWURB: timer_shutdown_sync(&dev->timer); errorEP: usb_put_dev(dev->udev); errorUDEV: kfree(dev->fw_data); mutex_destroy(&dev->lock); errorFWDATA1: kfree(dev->cmdbuf); kfree(dev); pr_warn("Sensoray 2255 driver load failed: 0x%x\n", retval); return retval; } /* disconnect routine. when board is removed physically or with rmmod */ static void s2255_disconnect(struct usb_interface *interface) { struct s2255_dev *dev = to_s2255_dev(usb_get_intfdata(interface)); int i; int channels = refcount_read(&dev->num_channels); mutex_lock(&dev->lock); v4l2_device_disconnect(&dev->v4l2_dev); mutex_unlock(&dev->lock); /*see comments in the uvc_driver.c usb disconnect function */ refcount_inc(&dev->num_channels); /* unregister each video device. */ for (i = 0; i < channels; i++) video_unregister_device(&dev->vc[i].vdev); /* wake up any of our timers */ atomic_set(&dev->fw_data->fw_state, S2255_FW_DISCONNECTING); wake_up(&dev->fw_data->wait_fw); for (i = 0; i < MAX_CHANNELS; i++) { dev->vc[i].setmode_ready = 1; wake_up(&dev->vc[i].wait_setmode); dev->vc[i].vidstatus_ready = 1; wake_up(&dev->vc[i].wait_vidstatus); } if (refcount_dec_and_test(&dev->num_channels)) s2255_destroy(dev); dev_info(&interface->dev, "%s\n", __func__); } static struct usb_driver s2255_driver = { .name = S2255_DRIVER_NAME, .probe = s2255_probe, .disconnect = s2255_disconnect, .id_table = s2255_table, }; module_usb_driver(s2255_driver); MODULE_DESCRIPTION("Sensoray 2255 Video for Linux driver"); MODULE_AUTHOR("Dean Anderson (Sensoray Company Inc.)"); MODULE_LICENSE("GPL"); MODULE_VERSION(S2255_VERSION); MODULE_FIRMWARE(FIRMWARE_FILE_NAME);
287 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 // SPDX-License-Identifier: GPL-2.0-or-later /* * Network event notifiers * * Authors: * Tom Tucker <tom@opengridcomputing.com> * Steve Wise <swise@opengridcomputing.com> * * Fixes: */ #include <linux/rtnetlink.h> #include <linux/notifier.h> #include <linux/export.h> #include <net/netevent.h> static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain); /** * register_netevent_notifier - register a netevent notifier block * @nb: notifier * * Register a notifier to be called when a netevent occurs. * The notifier passed is linked into the kernel structures and must * not be reused until it has been unregistered. A negative errno code * is returned on a failure. */ int register_netevent_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&netevent_notif_chain, nb); } EXPORT_SYMBOL_GPL(register_netevent_notifier); /** * unregister_netevent_notifier - unregister a netevent notifier block * @nb: notifier * * Unregister a notifier previously registered by * register_neigh_notifier(). The notifier is unlinked into the * kernel structures and may then be reused. A negative errno code * is returned on a failure. */ int unregister_netevent_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&netevent_notif_chain, nb); } EXPORT_SYMBOL_GPL(unregister_netevent_notifier); /** * call_netevent_notifiers - call all netevent notifier blocks * @val: value passed unmodified to notifier function * @v: pointer passed unmodified to notifier function * * Call all neighbour notifier blocks. Parameters and return value * are as for notifier_call_chain(). */ int call_netevent_notifiers(unsigned long val, void *v) { return atomic_notifier_call_chain(&netevent_notif_chain, val, v); } EXPORT_SYMBOL_GPL(call_netevent_notifiers);
7 64 20 71 71 15 65 2 3 3 118 103 73 103 77 77 50 49 2 14 70 14 13 7 62 50 40 151 65 15 86 2 2 1 2 2 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 /* SPDX-License-Identifier: GPL-2.0-only */ /* * * Copyright (C) 2011 Novell Inc. */ #include <linux/kernel.h> #include <linux/uuid.h> #include <linux/fs.h> #include <linux/fsverity.h> #include <linux/namei.h> #include <linux/posix_acl.h> #include <linux/posix_acl_xattr.h> #include "ovl_entry.h" #undef pr_fmt #define pr_fmt(fmt) "overlayfs: " fmt enum ovl_path_type { __OVL_PATH_UPPER = (1 << 0), __OVL_PATH_MERGE = (1 << 1), __OVL_PATH_ORIGIN = (1 << 2), }; #define OVL_TYPE_UPPER(type) ((type) & __OVL_PATH_UPPER) #define OVL_TYPE_MERGE(type) ((type) & __OVL_PATH_MERGE) #define OVL_TYPE_ORIGIN(type) ((type) & __OVL_PATH_ORIGIN) #define OVL_XATTR_NAMESPACE "overlay." #define OVL_XATTR_TRUSTED_PREFIX XATTR_TRUSTED_PREFIX OVL_XATTR_NAMESPACE #define OVL_XATTR_TRUSTED_PREFIX_LEN (sizeof(OVL_XATTR_TRUSTED_PREFIX) - 1) #define OVL_XATTR_USER_PREFIX XATTR_USER_PREFIX OVL_XATTR_NAMESPACE #define OVL_XATTR_USER_PREFIX_LEN (sizeof(OVL_XATTR_USER_PREFIX) - 1) #define OVL_XATTR_ESCAPE_PREFIX OVL_XATTR_NAMESPACE #define OVL_XATTR_ESCAPE_PREFIX_LEN (sizeof(OVL_XATTR_ESCAPE_PREFIX) - 1) #define OVL_XATTR_ESCAPE_TRUSTED_PREFIX OVL_XATTR_TRUSTED_PREFIX OVL_XATTR_ESCAPE_PREFIX #define OVL_XATTR_ESCAPE_TRUSTED_PREFIX_LEN (sizeof(OVL_XATTR_ESCAPE_TRUSTED_PREFIX) - 1) #define OVL_XATTR_ESCAPE_USER_PREFIX OVL_XATTR_USER_PREFIX OVL_XATTR_ESCAPE_PREFIX #define OVL_XATTR_ESCAPE_USER_PREFIX_LEN (sizeof(OVL_XATTR_ESCAPE_USER_PREFIX) - 1) enum ovl_xattr { OVL_XATTR_OPAQUE, OVL_XATTR_REDIRECT, OVL_XATTR_ORIGIN, OVL_XATTR_IMPURE, OVL_XATTR_NLINK, OVL_XATTR_UPPER, OVL_XATTR_UUID, OVL_XATTR_METACOPY, OVL_XATTR_PROTATTR, OVL_XATTR_XWHITEOUT, }; enum ovl_inode_flag { /* Pure upper dir that may contain non pure upper entries */ OVL_IMPURE, /* Non-merge dir that may contain whiteout entries */ OVL_WHITEOUTS, OVL_INDEX, OVL_UPPERDATA, /* Inode number will remain constant over copy up. */ OVL_CONST_INO, OVL_HAS_DIGEST, OVL_VERIFIED_DIGEST, }; enum ovl_entry_flag { OVL_E_UPPER_ALIAS, OVL_E_OPAQUE, OVL_E_CONNECTED, /* Lower stack may contain xwhiteout entries */ OVL_E_XWHITEOUTS, }; enum { OVL_REDIRECT_OFF, /* "off" mode is never used. In effect */ OVL_REDIRECT_FOLLOW, /* ...it translates to either "follow" */ OVL_REDIRECT_NOFOLLOW, /* ...or "nofollow". */ OVL_REDIRECT_ON, }; enum { OVL_UUID_OFF, OVL_UUID_NULL, OVL_UUID_AUTO, OVL_UUID_ON, }; enum { OVL_XINO_OFF, OVL_XINO_AUTO, OVL_XINO_ON, }; enum { OVL_VERITY_OFF, OVL_VERITY_ON, OVL_VERITY_REQUIRE, }; /* * The tuple (fh,uuid) is a universal unique identifier for a copy up origin, * where: * origin.fh - exported file handle of the lower file * origin.uuid - uuid of the lower filesystem */ #define OVL_FH_VERSION 0 #define OVL_FH_MAGIC 0xfb /* CPU byte order required for fid decoding: */ #define OVL_FH_FLAG_BIG_ENDIAN (1 << 0) #define OVL_FH_FLAG_ANY_ENDIAN (1 << 1) /* Is the real inode encoded in fid an upper inode? */ #define OVL_FH_FLAG_PATH_UPPER (1 << 2) #define OVL_FH_FLAG_ALL (OVL_FH_FLAG_BIG_ENDIAN | OVL_FH_FLAG_ANY_ENDIAN | \ OVL_FH_FLAG_PATH_UPPER) #if defined(__LITTLE_ENDIAN) #define OVL_FH_FLAG_CPU_ENDIAN 0 #elif defined(__BIG_ENDIAN) #define OVL_FH_FLAG_CPU_ENDIAN OVL_FH_FLAG_BIG_ENDIAN #else #error Endianness not defined #endif /* The type used to be returned by overlay exportfs for misaligned fid */ #define OVL_FILEID_V0 0xfb /* The type returned by overlay exportfs for 32bit aligned fid */ #define OVL_FILEID_V1 0xf8 /* On-disk format for "origin" file handle */ struct ovl_fb { u8 version; /* 0 */ u8 magic; /* 0xfb */ u8 len; /* size of this header + size of fid */ u8 flags; /* OVL_FH_FLAG_* */ u8 type; /* fid_type of fid */ uuid_t uuid; /* uuid of filesystem */ u32 fid[]; /* file identifier should be 32bit aligned in-memory */ } __packed; /* In-memory and on-wire format for overlay file handle */ struct ovl_fh { u8 padding[3]; /* make sure fb.fid is 32bit aligned */ union { struct ovl_fb fb; DECLARE_FLEX_ARRAY(u8, buf); }; } __packed; #define OVL_FH_WIRE_OFFSET offsetof(struct ovl_fh, fb) #define OVL_FH_LEN(fh) (OVL_FH_WIRE_OFFSET + (fh)->fb.len) #define OVL_FH_FID_OFFSET (OVL_FH_WIRE_OFFSET + \ offsetof(struct ovl_fb, fid)) /* On-disk format for "metacopy" xattr (if non-zero size) */ struct ovl_metacopy { u8 version; /* 0 */ u8 len; /* size of this header + used digest bytes */ u8 flags; u8 digest_algo; /* FS_VERITY_HASH_ALG_* constant, 0 for no digest */ u8 digest[FS_VERITY_MAX_DIGEST_SIZE]; /* Only the used part on disk */ } __packed; #define OVL_METACOPY_MAX_SIZE (sizeof(struct ovl_metacopy)) #define OVL_METACOPY_MIN_SIZE (OVL_METACOPY_MAX_SIZE - FS_VERITY_MAX_DIGEST_SIZE) #define OVL_METACOPY_INIT { 0, OVL_METACOPY_MIN_SIZE } static inline int ovl_metadata_digest_size(const struct ovl_metacopy *metacopy) { if (metacopy->len < OVL_METACOPY_MIN_SIZE) return 0; return (int)metacopy->len - OVL_METACOPY_MIN_SIZE; } /* No atime modification on underlying */ #define OVL_OPEN_FLAGS (O_NOATIME) extern const char *const ovl_xattr_table[][2]; static inline const char *ovl_xattr(struct ovl_fs *ofs, enum ovl_xattr ox) { return ovl_xattr_table[ox][ofs->config.userxattr]; } /* * When changing ownership of an upper object map the intended ownership * according to the upper layer's idmapping. When an upper mount idmaps files * that are stored on-disk as owned by id 1001 to id 1000 this means stat on * this object will report it as being owned by id 1000 when calling stat via * the upper mount. * In order to change ownership of an object so stat reports id 1000 when * called on an idmapped upper mount the value written to disk - i.e., the * value stored in ia_*id - must 1001. The mount mapping helper will thus take * care to map 1000 to 1001. * The mnt idmapping helpers are nops if the upper layer isn't idmapped. */ static inline int ovl_do_notify_change(struct ovl_fs *ofs, struct dentry *upperdentry, struct iattr *attr) { return notify_change(ovl_upper_mnt_idmap(ofs), upperdentry, attr, NULL); } static inline int ovl_do_rmdir(struct ovl_fs *ofs, struct inode *dir, struct dentry *dentry) { int err = vfs_rmdir(ovl_upper_mnt_idmap(ofs), dir, dentry); pr_debug("rmdir(%pd2) = %i\n", dentry, err); return err; } static inline int ovl_do_unlink(struct ovl_fs *ofs, struct inode *dir, struct dentry *dentry) { int err = vfs_unlink(ovl_upper_mnt_idmap(ofs), dir, dentry, NULL); pr_debug("unlink(%pd2) = %i\n", dentry, err); return err; } static inline int ovl_do_link(struct ovl_fs *ofs, struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { int err = vfs_link(old_dentry, ovl_upper_mnt_idmap(ofs), dir, new_dentry, NULL); pr_debug("link(%pd2, %pd2) = %i\n", old_dentry, new_dentry, err); return err; } static inline int ovl_do_create(struct ovl_fs *ofs, struct inode *dir, struct dentry *dentry, umode_t mode) { int err = vfs_create(ovl_upper_mnt_idmap(ofs), dir, dentry, mode, true); pr_debug("create(%pd2, 0%o) = %i\n", dentry, mode, err); return err; } static inline struct dentry *ovl_do_mkdir(struct ovl_fs *ofs, struct inode *dir, struct dentry *dentry, umode_t mode) { struct dentry *ret; ret = vfs_mkdir(ovl_upper_mnt_idmap(ofs), dir, dentry, mode); pr_debug("mkdir(%pd2, 0%o) = %i\n", dentry, mode, PTR_ERR_OR_ZERO(ret)); return ret; } static inline int ovl_do_mknod(struct ovl_fs *ofs, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { int err = vfs_mknod(ovl_upper_mnt_idmap(ofs), dir, dentry, mode, dev); pr_debug("mknod(%pd2, 0%o, 0%o) = %i\n", dentry, mode, dev, err); return err; } static inline int ovl_do_symlink(struct ovl_fs *ofs, struct inode *dir, struct dentry *dentry, const char *oldname) { int err = vfs_symlink(ovl_upper_mnt_idmap(ofs), dir, dentry, oldname); pr_debug("symlink(\"%s\", %pd2) = %i\n", oldname, dentry, err); return err; } static inline ssize_t ovl_do_getxattr(const struct path *path, const char *name, void *value, size_t size) { int err, len; WARN_ON(path->dentry->d_sb != path->mnt->mnt_sb); err = vfs_getxattr(mnt_idmap(path->mnt), path->dentry, name, value, size); len = (value && err > 0) ? err : 0; pr_debug("getxattr(%pd2, \"%s\", \"%*pE\", %zu, 0) = %i\n", path->dentry, name, min(len, 48), value, size, err); return err; } static inline ssize_t ovl_getxattr_upper(struct ovl_fs *ofs, struct dentry *upperdentry, enum ovl_xattr ox, void *value, size_t size) { struct path upperpath = { .dentry = upperdentry, .mnt = ovl_upper_mnt(ofs), }; return ovl_do_getxattr(&upperpath, ovl_xattr(ofs, ox), value, size); } static inline ssize_t ovl_path_getxattr(struct ovl_fs *ofs, const struct path *path, enum ovl_xattr ox, void *value, size_t size) { return ovl_do_getxattr(path, ovl_xattr(ofs, ox), value, size); } static inline int ovl_do_setxattr(struct ovl_fs *ofs, struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { int err = vfs_setxattr(ovl_upper_mnt_idmap(ofs), dentry, name, value, size, flags); pr_debug("setxattr(%pd2, \"%s\", \"%*pE\", %zu, %d) = %i\n", dentry, name, min((int)size, 48), value, size, flags, err); return err; } static inline int ovl_setxattr(struct ovl_fs *ofs, struct dentry *dentry, enum ovl_xattr ox, const void *value, size_t size) { return ovl_do_setxattr(ofs, dentry, ovl_xattr(ofs, ox), value, size, 0); } static inline int ovl_do_removexattr(struct ovl_fs *ofs, struct dentry *dentry, const char *name) { int err = vfs_removexattr(ovl_upper_mnt_idmap(ofs), dentry, name); pr_debug("removexattr(%pd2, \"%s\") = %i\n", dentry, name, err); return err; } static inline int ovl_removexattr(struct ovl_fs *ofs, struct dentry *dentry, enum ovl_xattr ox) { return ovl_do_removexattr(ofs, dentry, ovl_xattr(ofs, ox)); } static inline int ovl_do_set_acl(struct ovl_fs *ofs, struct dentry *dentry, const char *acl_name, struct posix_acl *acl) { return vfs_set_acl(ovl_upper_mnt_idmap(ofs), dentry, acl_name, acl); } static inline int ovl_do_remove_acl(struct ovl_fs *ofs, struct dentry *dentry, const char *acl_name) { return vfs_remove_acl(ovl_upper_mnt_idmap(ofs), dentry, acl_name); } static inline int ovl_do_rename(struct ovl_fs *ofs, struct dentry *olddir, struct dentry *olddentry, struct dentry *newdir, struct dentry *newdentry, unsigned int flags) { int err; struct renamedata rd = { .mnt_idmap = ovl_upper_mnt_idmap(ofs), .old_parent = olddir, .old_dentry = olddentry, .new_parent = newdir, .new_dentry = newdentry, .flags = flags, }; pr_debug("rename(%pd2, %pd2, 0x%x)\n", olddentry, newdentry, flags); err = vfs_rename(&rd); if (err) { pr_debug("...rename(%pd2, %pd2, ...) = %i\n", olddentry, newdentry, err); } return err; } static inline int ovl_do_whiteout(struct ovl_fs *ofs, struct inode *dir, struct dentry *dentry) { int err = vfs_whiteout(ovl_upper_mnt_idmap(ofs), dir, dentry); pr_debug("whiteout(%pd2) = %i\n", dentry, err); return err; } static inline struct file *ovl_do_tmpfile(struct ovl_fs *ofs, struct dentry *dentry, umode_t mode) { struct path path = { .mnt = ovl_upper_mnt(ofs), .dentry = dentry }; struct file *file = kernel_tmpfile_open(ovl_upper_mnt_idmap(ofs), &path, mode, O_LARGEFILE | O_WRONLY, current_cred()); int err = PTR_ERR_OR_ZERO(file); pr_debug("tmpfile(%pd2, 0%o) = %i\n", dentry, mode, err); return file; } static inline struct dentry *ovl_lookup_upper(struct ovl_fs *ofs, const char *name, struct dentry *base, int len) { return lookup_one(ovl_upper_mnt_idmap(ofs), &QSTR_LEN(name, len), base); } static inline struct dentry *ovl_lookup_upper_unlocked(struct ovl_fs *ofs, const char *name, struct dentry *base, int len) { return lookup_one_unlocked(ovl_upper_mnt_idmap(ofs), &QSTR_LEN(name, len), base); } static inline bool ovl_open_flags_need_copy_up(int flags) { if (!flags) return false; return ((OPEN_FMODE(flags) & FMODE_WRITE) || (flags & O_TRUNC)); } /* util.c */ int ovl_parent_lock(struct dentry *parent, struct dentry *child); static inline void ovl_parent_unlock(struct dentry *parent) { inode_unlock(parent->d_inode); } int ovl_get_write_access(struct dentry *dentry); void ovl_put_write_access(struct dentry *dentry); void ovl_start_write(struct dentry *dentry); void ovl_end_write(struct dentry *dentry); int ovl_want_write(struct dentry *dentry); void ovl_drop_write(struct dentry *dentry); struct dentry *ovl_workdir(struct dentry *dentry); const struct cred *ovl_override_creds(struct super_block *sb); void ovl_revert_creds(const struct cred *old_cred); static inline const struct cred *ovl_creds(struct super_block *sb) { return OVL_FS(sb)->creator_cred; } int ovl_can_decode_fh(struct super_block *sb); struct dentry *ovl_indexdir(struct super_block *sb); bool ovl_index_all(struct super_block *sb); bool ovl_verify_lower(struct super_block *sb); struct ovl_path *ovl_stack_alloc(unsigned int n); void ovl_stack_cpy(struct ovl_path *dst, struct ovl_path *src, unsigned int n); void ovl_stack_put(struct ovl_path *stack, unsigned int n); void ovl_stack_free(struct ovl_path *stack, unsigned int n); struct ovl_entry *ovl_alloc_entry(unsigned int numlower); void ovl_free_entry(struct ovl_entry *oe); bool ovl_dentry_remote(struct dentry *dentry); void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *realdentry); void ovl_dentry_init_reval(struct dentry *dentry, struct dentry *upperdentry, struct ovl_entry *oe); void ovl_dentry_init_flags(struct dentry *dentry, struct dentry *upperdentry, struct ovl_entry *oe, unsigned int mask); bool ovl_dentry_weird(struct dentry *dentry); static inline bool ovl_dentry_casefolded(struct dentry *dentry) { return sb_has_encoding(dentry->d_sb) && IS_CASEFOLDED(d_inode(dentry)); } enum ovl_path_type ovl_path_type(struct dentry *dentry); void ovl_path_upper(struct dentry *dentry, struct path *path); void ovl_path_lower(struct dentry *dentry, struct path *path); void ovl_path_lowerdata(struct dentry *dentry, struct path *path); struct inode *ovl_i_path_real(struct inode *inode, struct path *path); enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path); enum ovl_path_type ovl_path_realdata(struct dentry *dentry, struct path *path); struct dentry *ovl_dentry_upper(struct dentry *dentry); struct dentry *ovl_dentry_lower(struct dentry *dentry); struct dentry *ovl_dentry_lowerdata(struct dentry *dentry); int ovl_dentry_set_lowerdata(struct dentry *dentry, struct ovl_path *datapath); const struct ovl_layer *ovl_i_layer_lower(struct inode *inode); const struct ovl_layer *ovl_layer_lower(struct dentry *dentry); struct dentry *ovl_dentry_real(struct dentry *dentry); struct dentry *ovl_i_dentry_upper(struct inode *inode); struct inode *ovl_inode_upper(struct inode *inode); struct inode *ovl_inode_lower(struct inode *inode); struct inode *ovl_inode_lowerdata(struct inode *inode); struct inode *ovl_inode_real(struct inode *inode); struct inode *ovl_inode_realdata(struct inode *inode); const char *ovl_lowerdata_redirect(struct inode *inode); struct ovl_dir_cache *ovl_dir_cache(struct inode *inode); void ovl_set_dir_cache(struct inode *inode, struct ovl_dir_cache *cache); void ovl_dentry_set_flag(unsigned long flag, struct dentry *dentry); void ovl_dentry_clear_flag(unsigned long flag, struct dentry *dentry); bool ovl_dentry_test_flag(unsigned long flag, struct dentry *dentry); bool ovl_dentry_is_opaque(struct dentry *dentry); bool ovl_dentry_is_whiteout(struct dentry *dentry); void ovl_dentry_set_opaque(struct dentry *dentry); bool ovl_dentry_has_xwhiteouts(struct dentry *dentry); void ovl_dentry_set_xwhiteouts(struct dentry *dentry); void ovl_layer_set_xwhiteouts(struct ovl_fs *ofs, const struct ovl_layer *layer); bool ovl_dentry_has_upper_alias(struct dentry *dentry); void ovl_dentry_set_upper_alias(struct dentry *dentry); bool ovl_dentry_needs_data_copy_up(struct dentry *dentry, int flags); bool ovl_dentry_needs_data_copy_up_locked(struct dentry *dentry, int flags); bool ovl_has_upperdata(struct inode *inode); void ovl_set_upperdata(struct inode *inode); const char *ovl_dentry_get_redirect(struct dentry *dentry); void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect); void ovl_inode_update(struct inode *inode, struct dentry *upperdentry); void ovl_dir_modified(struct dentry *dentry, bool impurity); u64 ovl_inode_version_get(struct inode *inode); bool ovl_is_whiteout(struct dentry *dentry); bool ovl_path_is_whiteout(struct ovl_fs *ofs, const struct path *path); struct file *ovl_path_open(const struct path *path, int flags); int ovl_copy_up_start(struct dentry *dentry, int flags); void ovl_copy_up_end(struct dentry *dentry); bool ovl_already_copied_up(struct dentry *dentry, int flags); char ovl_get_dir_xattr_val(struct ovl_fs *ofs, const struct path *path, enum ovl_xattr ox); bool ovl_path_check_origin_xattr(struct ovl_fs *ofs, const struct path *path); bool ovl_path_check_xwhiteout_xattr(struct ovl_fs *ofs, const struct path *path); bool ovl_init_uuid_xattr(struct super_block *sb, struct ovl_fs *ofs, const struct path *upperpath); static inline bool ovl_upper_is_whiteout(struct ovl_fs *ofs, struct dentry *upperdentry) { struct path upperpath = { .dentry = upperdentry, .mnt = ovl_upper_mnt(ofs), }; return ovl_path_is_whiteout(ofs, &upperpath); } static inline bool ovl_check_origin_xattr(struct ovl_fs *ofs, struct dentry *upperdentry) { struct path upperpath = { .dentry = upperdentry, .mnt = ovl_upper_mnt(ofs), }; return ovl_path_check_origin_xattr(ofs, &upperpath); } int ovl_check_setxattr(struct ovl_fs *ofs, struct dentry *upperdentry, enum ovl_xattr ox, const void *value, size_t size, int xerr); int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry); bool ovl_inuse_trylock(struct dentry *dentry); void ovl_inuse_unlock(struct dentry *dentry); bool ovl_is_inuse(struct dentry *dentry); bool ovl_need_index(struct dentry *dentry); int ovl_nlink_start(struct dentry *dentry); void ovl_nlink_end(struct dentry *dentry); int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *work, struct dentry *upperdir, struct dentry *upper); int ovl_check_metacopy_xattr(struct ovl_fs *ofs, const struct path *path, struct ovl_metacopy *data); int ovl_set_metacopy_xattr(struct ovl_fs *ofs, struct dentry *d, struct ovl_metacopy *metacopy); bool ovl_is_metacopy_dentry(struct dentry *dentry); char *ovl_get_redirect_xattr(struct ovl_fs *ofs, const struct path *path, int padding); int ovl_ensure_verity_loaded(const struct path *path); int ovl_validate_verity(struct ovl_fs *ofs, const struct path *metapath, const struct path *datapath); int ovl_get_verity_digest(struct ovl_fs *ofs, const struct path *src, struct ovl_metacopy *metacopy); int ovl_sync_status(struct ovl_fs *ofs); static inline void ovl_set_flag(unsigned long flag, struct inode *inode) { set_bit(flag, &OVL_I(inode)->flags); } static inline void ovl_clear_flag(unsigned long flag, struct inode *inode) { clear_bit(flag, &OVL_I(inode)->flags); } static inline bool ovl_test_flag(unsigned long flag, struct inode *inode) { return test_bit(flag, &OVL_I(inode)->flags); } static inline bool ovl_is_impuredir(struct super_block *sb, struct dentry *upperdentry) { struct ovl_fs *ofs = OVL_FS(sb); struct path upperpath = { .dentry = upperdentry, .mnt = ovl_upper_mnt(ofs), }; return ovl_get_dir_xattr_val(ofs, &upperpath, OVL_XATTR_IMPURE) == 'y'; } static inline char ovl_get_opaquedir_val(struct ovl_fs *ofs, const struct path *path) { return ovl_get_dir_xattr_val(ofs, path, OVL_XATTR_OPAQUE); } static inline bool ovl_redirect_follow(struct ovl_fs *ofs) { return ofs->config.redirect_mode != OVL_REDIRECT_NOFOLLOW; } static inline bool ovl_redirect_dir(struct ovl_fs *ofs) { return ofs->config.redirect_mode == OVL_REDIRECT_ON; } static inline bool ovl_origin_uuid(struct ovl_fs *ofs) { return ofs->config.uuid != OVL_UUID_OFF; } static inline bool ovl_has_fsid(struct ovl_fs *ofs) { return ofs->config.uuid == OVL_UUID_ON || ofs->config.uuid == OVL_UUID_AUTO; } /* * With xino=auto, we do best effort to keep all inodes on same st_dev and * d_ino consistent with st_ino. * With xino=on, we do the same effort but we warn if we failed. */ static inline bool ovl_xino_warn(struct ovl_fs *ofs) { return ofs->config.xino == OVL_XINO_ON; } /* * To avoid regressions in existing setups with overlay lower offline changes, * we allow lower changes only if none of the new features are used. */ static inline bool ovl_allow_offline_changes(struct ovl_fs *ofs) { return (!ofs->config.index && !ofs->config.metacopy && !ovl_redirect_dir(ofs) && !ovl_xino_warn(ofs)); } /* All layers on same fs? */ static inline bool ovl_same_fs(struct ovl_fs *ofs) { return ofs->xino_mode == 0; } /* All overlay inodes have same st_dev? */ static inline bool ovl_same_dev(struct ovl_fs *ofs) { return ofs->xino_mode >= 0; } static inline unsigned int ovl_xino_bits(struct ovl_fs *ofs) { return ovl_same_dev(ofs) ? ofs->xino_mode : 0; } static inline void ovl_inode_lock(struct inode *inode) { mutex_lock(&OVL_I(inode)->lock); } static inline int ovl_inode_lock_interruptible(struct inode *inode) { return mutex_lock_interruptible(&OVL_I(inode)->lock); } static inline void ovl_inode_unlock(struct inode *inode) { mutex_unlock(&OVL_I(inode)->lock); } /* namei.c */ int ovl_check_fb_len(struct ovl_fb *fb, int fb_len); static inline int ovl_check_fh_len(struct ovl_fh *fh, int fh_len) { if (fh_len < sizeof(struct ovl_fh)) return -EINVAL; return ovl_check_fb_len(&fh->fb, fh_len - OVL_FH_WIRE_OFFSET); } struct dentry *ovl_decode_real_fh(struct ovl_fs *ofs, struct ovl_fh *fh, struct vfsmount *mnt, bool connected); int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected, struct dentry *upperdentry, struct ovl_path **stackp); int ovl_verify_set_fh(struct ovl_fs *ofs, struct dentry *dentry, enum ovl_xattr ox, const struct ovl_fh *fh, bool is_upper, bool set); int ovl_verify_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry, enum ovl_xattr ox, struct dentry *real, bool is_upper, bool set); struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index, bool connected); int ovl_verify_index(struct ovl_fs *ofs, struct dentry *index); int ovl_get_index_name_fh(const struct ovl_fh *fh, struct qstr *name); int ovl_get_index_name(struct ovl_fs *ofs, struct dentry *origin, struct qstr *name); struct dentry *ovl_get_index_fh(struct ovl_fs *ofs, struct ovl_fh *fh); struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper, struct dentry *origin, bool verify); int ovl_path_next(int idx, struct dentry *dentry, struct path *path, const struct ovl_layer **layer); int ovl_verify_lowerdata(struct dentry *dentry); struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags); bool ovl_lower_positive(struct dentry *dentry); static inline int ovl_verify_origin_fh(struct ovl_fs *ofs, struct dentry *upper, const struct ovl_fh *fh, bool set) { return ovl_verify_set_fh(ofs, upper, OVL_XATTR_ORIGIN, fh, false, set); } static inline int ovl_verify_origin(struct ovl_fs *ofs, struct dentry *upper, struct dentry *origin, bool set) { return ovl_verify_origin_xattr(ofs, upper, OVL_XATTR_ORIGIN, origin, false, set); } static inline int ovl_verify_upper(struct ovl_fs *ofs, struct dentry *index, struct dentry *upper, bool set) { return ovl_verify_origin_xattr(ofs, index, OVL_XATTR_UPPER, upper, true, set); } /* readdir.c */ extern const struct file_operations ovl_dir_operations; struct file *ovl_dir_real_file(const struct file *file, bool want_upper); int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list); void ovl_cleanup_whiteouts(struct ovl_fs *ofs, struct dentry *upper, struct list_head *list); void ovl_cache_free(struct list_head *list); void ovl_dir_cache_free(struct inode *inode); int ovl_check_d_type_supported(const struct path *realpath); int ovl_workdir_cleanup(struct ovl_fs *ofs, struct dentry *parent, struct vfsmount *mnt, struct dentry *dentry, int level); int ovl_indexdir_cleanup(struct ovl_fs *ofs); /* * Can we iterate real dir directly? * * Non-merge dir may contain whiteouts from a time it was a merge upper, before * lower dir was removed under it and possibly before it was rotated from upper * to lower layer. */ static inline bool ovl_dir_is_real(struct inode *dir) { return !ovl_test_flag(OVL_WHITEOUTS, dir); } /* inode.c */ int ovl_set_nlink_upper(struct dentry *dentry); int ovl_set_nlink_lower(struct dentry *dentry); unsigned int ovl_get_nlink(struct ovl_fs *ofs, struct dentry *lowerdentry, struct dentry *upperdentry, unsigned int fallback); int ovl_permission(struct mnt_idmap *idmap, struct inode *inode, int mask); #ifdef CONFIG_FS_POSIX_ACL struct posix_acl *do_ovl_get_acl(struct mnt_idmap *idmap, struct inode *inode, int type, bool rcu, bool noperm); static inline struct posix_acl *ovl_get_inode_acl(struct inode *inode, int type, bool rcu) { return do_ovl_get_acl(&nop_mnt_idmap, inode, type, rcu, true); } static inline struct posix_acl *ovl_get_acl(struct mnt_idmap *idmap, struct dentry *dentry, int type) { return do_ovl_get_acl(idmap, d_inode(dentry), type, false, false); } int ovl_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, struct posix_acl *acl, int type); struct posix_acl *ovl_get_acl_path(const struct path *path, const char *acl_name, bool noperm); #else #define ovl_get_inode_acl NULL #define ovl_get_acl NULL #define ovl_set_acl NULL static inline struct posix_acl *ovl_get_acl_path(const struct path *path, const char *acl_name, bool noperm) { return NULL; } #endif int ovl_update_time(struct inode *inode, int flags); bool ovl_is_private_xattr(struct super_block *sb, const char *name); struct ovl_inode_params { struct inode *newinode; struct dentry *upperdentry; struct ovl_entry *oe; bool index; char *redirect; char *lowerdata_redirect; }; void ovl_inode_init(struct inode *inode, struct ovl_inode_params *oip, unsigned long ino, int fsid); struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev); struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real, bool is_upper); bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir); struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir); struct inode *ovl_get_inode(struct super_block *sb, struct ovl_inode_params *oip); void ovl_copyattr(struct inode *to); /* vfs fileattr flags read from overlay.protattr xattr to ovl inode */ #define OVL_PROT_I_FLAGS_MASK (S_APPEND | S_IMMUTABLE) /* vfs fileattr flags copied from real to ovl inode */ #define OVL_FATTR_I_FLAGS_MASK (OVL_PROT_I_FLAGS_MASK | S_SYNC | S_NOATIME) /* vfs inode flags copied from real to ovl inode */ #define OVL_COPY_I_FLAGS_MASK (OVL_FATTR_I_FLAGS_MASK | S_CASEFOLD) /* * fileattr flags copied from lower to upper inode on copy up. * We cannot copy up immutable/append-only flags, because that would prevent * linking temp inode to upper dir, so we store them in xattr instead. */ #define OVL_COPY_FS_FLAGS_MASK (FS_SYNC_FL | FS_NOATIME_FL) #define OVL_COPY_FSX_FLAGS_MASK (FS_XFLAG_SYNC | FS_XFLAG_NOATIME) #define OVL_PROT_FS_FLAGS_MASK (FS_APPEND_FL | FS_IMMUTABLE_FL) #define OVL_PROT_FSX_FLAGS_MASK (FS_XFLAG_APPEND | FS_XFLAG_IMMUTABLE) void ovl_check_protattr(struct inode *inode, struct dentry *upper); int ovl_set_protattr(struct inode *inode, struct dentry *upper, struct file_kattr *fa); static inline void ovl_copyflags(struct inode *from, struct inode *to) { unsigned int mask = OVL_COPY_I_FLAGS_MASK; inode_set_flags(to, from->i_flags & mask, mask); } /* dir.c */ extern const struct inode_operations ovl_dir_inode_operations; int ovl_cleanup_and_whiteout(struct ovl_fs *ofs, struct dentry *dir, struct dentry *dentry); struct ovl_cattr { dev_t rdev; umode_t mode; const char *link; struct dentry *hardlink; }; #define OVL_CATTR(m) (&(struct ovl_cattr) { .mode = (m) }) struct dentry *ovl_create_real(struct ovl_fs *ofs, struct dentry *parent, struct dentry *newdentry, struct ovl_cattr *attr); int ovl_cleanup(struct ovl_fs *ofs, struct dentry *workdir, struct dentry *dentry); struct dentry *ovl_lookup_temp(struct ovl_fs *ofs, struct dentry *workdir); struct dentry *ovl_create_temp(struct ovl_fs *ofs, struct dentry *workdir, struct ovl_cattr *attr); /* file.c */ extern const struct file_operations ovl_file_operations; int ovl_real_fileattr_get(const struct path *realpath, struct file_kattr *fa); int ovl_real_fileattr_set(const struct path *realpath, struct file_kattr *fa); int ovl_fileattr_get(struct dentry *dentry, struct file_kattr *fa); int ovl_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct file_kattr *fa); struct ovl_file; struct ovl_file *ovl_file_alloc(struct file *realfile); void ovl_file_free(struct ovl_file *of); /* copy_up.c */ int ovl_copy_up(struct dentry *dentry); int ovl_copy_up_with_data(struct dentry *dentry); int ovl_maybe_copy_up(struct dentry *dentry, int flags); int ovl_copy_xattr(struct super_block *sb, const struct path *path, struct dentry *new); int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upper, struct kstat *stat); struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct inode *realinode, bool is_upper); struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin); int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh, struct dentry *upper); /* export.c */ extern const struct export_operations ovl_export_operations; extern const struct export_operations ovl_export_fid_operations; /* super.c */ int ovl_fill_super(struct super_block *sb, struct fs_context *fc); /* Will this overlay be forced to mount/remount ro? */ static inline bool ovl_force_readonly(struct ovl_fs *ofs) { return (!ovl_upper_mnt(ofs) || !ofs->workdir); } /* xattr.c */ const struct xattr_handler * const *ovl_xattr_handlers(struct ovl_fs *ofs); int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr); int ovl_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags); ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
24518 24873 25032 27805 28046 632 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __X86_KERNEL_FPU_CONTEXT_H #define __X86_KERNEL_FPU_CONTEXT_H #include <asm/fpu/xstate.h> #include <asm/trace/fpu.h> /* Functions related to FPU context tracking */ /* * The in-register FPU state for an FPU context on a CPU is assumed to be * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx * matches the FPU. * * If the FPU register state is valid, the kernel can skip restoring the * FPU state from memory. * * Any code that clobbers the FPU registers or updates the in-memory * FPU state for a task MUST let the rest of the kernel know that the * FPU registers are no longer valid for this task. * * Invalidate a resource you control: CPU if using the CPU for something else * (with preemption disabled), FPU for the current task, or a task that * is prevented from running by the current task. */ static inline void __cpu_invalidate_fpregs_state(void) { __this_cpu_write(fpu_fpregs_owner_ctx, NULL); } static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu) { fpu->last_cpu = -1; } static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu) { return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; } static inline void fpregs_deactivate(struct fpu *fpu) { __this_cpu_write(fpu_fpregs_owner_ctx, NULL); trace_x86_fpu_regs_deactivated(fpu); } static inline void fpregs_activate(struct fpu *fpu) { __this_cpu_write(fpu_fpregs_owner_ctx, fpu); trace_x86_fpu_regs_activated(fpu); } /* Internal helper for switch_fpu_return() and signal frame setup */ static inline void fpregs_restore_userregs(void) { struct fpu *fpu = x86_task_fpu(current); int cpu = smp_processor_id(); if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_USER_WORKER))) return; if (!fpregs_state_valid(fpu, cpu)) { /* * This restores _all_ xstate which has not been * established yet. * * If PKRU is enabled, then the PKRU value is already * correct because it was either set in switch_to() or in * flush_thread(). So it is excluded because it might be * not up to date in current->thread.fpu->xsave state. * * XFD state is handled in restore_fpregs_from_fpstate(). */ restore_fpregs_from_fpstate(fpu->fpstate, XFEATURE_MASK_FPSTATE); fpregs_activate(fpu); fpu->last_cpu = cpu; } clear_thread_flag(TIF_NEED_FPU_LOAD); } #endif
9 9 9 10 9 9 13 3 11 9 9 1 1 9 9 12 13 32 3 3 3 3 3 6 6 26 26 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 // SPDX-License-Identifier: GPL-2.0-only /* * count the number of connections matching an arbitrary key. * * (C) 2017 Red Hat GmbH * Author: Florian Westphal <fw@strlen.de> * * split from xt_connlimit.c: * (c) 2000 Gerd Knorr <kraxel@bytesex.org> * Nov 2002: Martin Bene <martin.bene@icomedias.com>: * only ignore TIME_WAIT or gone connections * (C) CC Computer Consultants GmbH, 2007 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/in.h> #include <linux/in6.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/jhash.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/rbtree.h> #include <linux/module.h> #include <linux/random.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/netfilter/nf_conntrack_tcp.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_count.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_tuple.h> #include <net/netfilter/nf_conntrack_zones.h> #define CONNCOUNT_SLOTS 256U #define CONNCOUNT_GC_MAX_NODES 8 #define MAX_KEYLEN 5 /* we will save the tuples of all connections we care about */ struct nf_conncount_tuple { struct list_head node; struct nf_conntrack_tuple tuple; struct nf_conntrack_zone zone; int cpu; u32 jiffies32; }; struct nf_conncount_rb { struct rb_node node; struct nf_conncount_list list; u32 key[MAX_KEYLEN]; struct rcu_head rcu_head; }; static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp; struct nf_conncount_data { unsigned int keylen; struct rb_root root[CONNCOUNT_SLOTS]; struct net *net; struct work_struct gc_work; unsigned long pending_trees[BITS_TO_LONGS(CONNCOUNT_SLOTS)]; unsigned int gc_tree; }; static u_int32_t conncount_rnd __read_mostly; static struct kmem_cache *conncount_rb_cachep __read_mostly; static struct kmem_cache *conncount_conn_cachep __read_mostly; static inline bool already_closed(const struct nf_conn *conn) { if (nf_ct_protonum(conn) == IPPROTO_TCP) return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT || conn->proto.tcp.state == TCP_CONNTRACK_CLOSE; else return false; } static int key_diff(const u32 *a, const u32 *b, unsigned int klen) { return memcmp(a, b, klen * sizeof(u32)); } static void conn_free(struct nf_conncount_list *list, struct nf_conncount_tuple *conn) { lockdep_assert_held(&list->list_lock); list->count--; list_del(&conn->node); kmem_cache_free(conncount_conn_cachep, conn); } static const struct nf_conntrack_tuple_hash * find_or_evict(struct net *net, struct nf_conncount_list *list, struct nf_conncount_tuple *conn) { const struct nf_conntrack_tuple_hash *found; unsigned long a, b; int cpu = raw_smp_processor_id(); u32 age; found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple); if (found) return found; b = conn->jiffies32; a = (u32)jiffies; /* conn might have been added just before by another cpu and * might still be unconfirmed. In this case, nf_conntrack_find() * returns no result. Thus only evict if this cpu added the * stale entry or if the entry is older than two jiffies. */ age = a - b; if (conn->cpu == cpu || age >= 2) { conn_free(list, conn); return ERR_PTR(-ENOENT); } return ERR_PTR(-EAGAIN); } static int __nf_conncount_add(struct net *net, struct nf_conncount_list *list, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone) { const struct nf_conntrack_tuple_hash *found; struct nf_conncount_tuple *conn, *conn_n; struct nf_conn *found_ct; unsigned int collect = 0; if ((u32)jiffies == list->last_gc) goto add_new_node; /* check the saved connections */ list_for_each_entry_safe(conn, conn_n, &list->head, node) { if (collect > CONNCOUNT_GC_MAX_NODES) break; found = find_or_evict(net, list, conn); if (IS_ERR(found)) { /* Not found, but might be about to be confirmed */ if (PTR_ERR(found) == -EAGAIN) { if (nf_ct_tuple_equal(&conn->tuple, tuple) && nf_ct_zone_id(&conn->zone, conn->zone.dir) == nf_ct_zone_id(zone, zone->dir)) return 0; /* already exists */ } else { collect++; } continue; } found_ct = nf_ct_tuplehash_to_ctrack(found); if (nf_ct_tuple_equal(&conn->tuple, tuple) && nf_ct_zone_equal(found_ct, zone, zone->dir)) { /* * We should not see tuples twice unless someone hooks * this into a table without "-p tcp --syn". * * Attempt to avoid a re-add in this case. */ nf_ct_put(found_ct); return 0; } else if (already_closed(found_ct)) { /* * we do not care about connections which are * closed already -> ditch it */ nf_ct_put(found_ct); conn_free(list, conn); collect++; continue; } nf_ct_put(found_ct); } add_new_node: if (WARN_ON_ONCE(list->count > INT_MAX)) return -EOVERFLOW; conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC); if (conn == NULL) return -ENOMEM; conn->tuple = *tuple; conn->zone = *zone; conn->cpu = raw_smp_processor_id(); conn->jiffies32 = (u32)jiffies; list_add_tail(&conn->node, &list->head); list->count++; list->last_gc = (u32)jiffies; return 0; } int nf_conncount_add(struct net *net, struct nf_conncount_list *list, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone) { int ret; /* check the saved connections */ spin_lock_bh(&list->list_lock); ret = __nf_conncount_add(net, list, tuple, zone); spin_unlock_bh(&list->list_lock); return ret; } EXPORT_SYMBOL_GPL(nf_conncount_add); void nf_conncount_list_init(struct nf_conncount_list *list) { spin_lock_init(&list->list_lock); INIT_LIST_HEAD(&list->head); list->count = 0; list->last_gc = (u32)jiffies; } EXPORT_SYMBOL_GPL(nf_conncount_list_init); /* Return true if the list is empty. Must be called with BH disabled. */ bool nf_conncount_gc_list(struct net *net, struct nf_conncount_list *list) { const struct nf_conntrack_tuple_hash *found; struct nf_conncount_tuple *conn, *conn_n; struct nf_conn *found_ct; unsigned int collected = 0; bool ret = false; /* don't bother if we just did GC */ if ((u32)jiffies == READ_ONCE(list->last_gc)) return false; /* don't bother if other cpu is already doing GC */ if (!spin_trylock(&list->list_lock)) return false; list_for_each_entry_safe(conn, conn_n, &list->head, node) { found = find_or_evict(net, list, conn); if (IS_ERR(found)) { if (PTR_ERR(found) == -ENOENT) collected++; continue; } found_ct = nf_ct_tuplehash_to_ctrack(found); if (already_closed(found_ct)) { /* * we do not care about connections which are * closed already -> ditch it */ nf_ct_put(found_ct); conn_free(list, conn); collected++; continue; } nf_ct_put(found_ct); if (collected > CONNCOUNT_GC_MAX_NODES) break; } if (!list->count) ret = true; list->last_gc = (u32)jiffies; spin_unlock(&list->list_lock); return ret; } EXPORT_SYMBOL_GPL(nf_conncount_gc_list); static void __tree_nodes_free(struct rcu_head *h) { struct nf_conncount_rb *rbconn; rbconn = container_of(h, struct nf_conncount_rb, rcu_head); kmem_cache_free(conncount_rb_cachep, rbconn); } /* caller must hold tree nf_conncount_locks[] lock */ static void tree_nodes_free(struct rb_root *root, struct nf_conncount_rb *gc_nodes[], unsigned int gc_count) { struct nf_conncount_rb *rbconn; while (gc_count) { rbconn = gc_nodes[--gc_count]; spin_lock(&rbconn->list.list_lock); if (!rbconn->list.count) { rb_erase(&rbconn->node, root); call_rcu(&rbconn->rcu_head, __tree_nodes_free); } spin_unlock(&rbconn->list.list_lock); } } static void schedule_gc_worker(struct nf_conncount_data *data, int tree) { set_bit(tree, data->pending_trees); schedule_work(&data->gc_work); } static unsigned int insert_tree(struct net *net, struct nf_conncount_data *data, struct rb_root *root, unsigned int hash, const u32 *key, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone) { struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES]; struct rb_node **rbnode, *parent; struct nf_conncount_rb *rbconn; struct nf_conncount_tuple *conn; unsigned int count = 0, gc_count = 0; bool do_gc = true; spin_lock_bh(&nf_conncount_locks[hash]); restart: parent = NULL; rbnode = &(root->rb_node); while (*rbnode) { int diff; rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node); parent = *rbnode; diff = key_diff(key, rbconn->key, data->keylen); if (diff < 0) { rbnode = &((*rbnode)->rb_left); } else if (diff > 0) { rbnode = &((*rbnode)->rb_right); } else { int ret; ret = nf_conncount_add(net, &rbconn->list, tuple, zone); if (ret) count = 0; /* hotdrop */ else count = rbconn->list.count; tree_nodes