Total coverage: 383040 (20%)of 2013489
1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 // SPDX-License-Identifier: GPL-2.0+ /* * comedi/drivers/comedi_test.c * * Generates fake waveform signals that can be read through * the command interface. It does _not_ read from any board; * it just generates deterministic waveforms. * Useful for various testing purposes. * * Copyright (C) 2002 Joachim Wuttke <Joachim.Wuttke@icn.siemens.de> * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net> * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2000 David A. Schleef <ds@schleef.org> */ /* * Driver: comedi_test * Description: generates fake waveforms * Author: Joachim Wuttke <Joachim.Wuttke@icn.siemens.de>, Frank Mori Hess * <fmhess@users.sourceforge.net>, ds * Devices: * Status: works * Updated: Sat, 16 Mar 2002 17:34:48 -0800 * * This driver is mainly for testing purposes, but can also be used to * generate sample waveforms on systems that don't have data acquisition * hardware. * * Auto-configuration is the default mode if no parameter is supplied during * module loading. Manual configuration requires COMEDI userspace tool. * To disable auto-configuration mode, pass "noauto=1" parameter for module * loading. Refer modinfo or MODULE_PARM_DESC description below for details. * * Auto-configuration options: * Refer modinfo or MODULE_PARM_DESC description below for details. * * Manual configuration options: * [0] - Amplitude in microvolts for fake waveforms (default 1 volt) * [1] - Period in microseconds for fake waveforms (default 0.1 sec) * * Generates a sawtooth wave on channel 0, square wave on channel 1, additional * waveforms could be added to other channels (currently they return flatline * zero volts). */ #include <linux/module.h> #include <linux/comedi/comedidev.h> #include <asm/div64.h> #include <linux/timer.h> #include <linux/ktime.h> #include <linux/jiffies.h> #include <linux/device.h> #include <linux/kdev_t.h> #define N_CHANS 8 #define DEV_NAME "comedi_testd" #define CLASS_NAME "comedi_test" static bool config_mode; static unsigned int set_amplitude; static unsigned int set_period; static const struct class ctcls = { .name = CLASS_NAME, }; static struct device *ctdev; module_param_named(noauto, config_mode, bool, 0444); MODULE_PARM_DESC(noauto, "Disable auto-configuration: (1=disable [defaults to enable])"); module_param_named(amplitude, set_amplitude, uint, 0444); MODULE_PARM_DESC(amplitude, "Set auto mode wave amplitude in microvolts: (defaults to 1 volt)"); module_param_named(period, set_period, uint, 0444); MODULE_PARM_DESC(period, "Set auto mode wave period in microseconds: (defaults to 0.1 sec)"); /* Data unique to this driver */ struct waveform_private { struct timer_list ai_timer; /* timer for AI commands */ u64 ai_convert_time; /* time of next AI conversion in usec */ unsigned int wf_amplitude; /* waveform amplitude in microvolts */ unsigned int wf_period; /* waveform period in microseconds */ unsigned int wf_current; /* current time in waveform period */ unsigned int ai_scan_period; /* AI scan period in usec */ unsigned int ai_convert_period; /* AI conversion period in usec */ struct timer_list ao_timer; /* timer for AO commands */ struct comedi_device *dev; /* parent comedi device */ u64 ao_last_scan_time; /* time of previous AO scan in usec */ unsigned int ao_scan_period; /* AO scan period in usec */ bool ai_timer_enable:1; /* should AI timer be running? */ bool ao_timer_enable:1; /* should AO timer be running? */ unsigned short ao_loopbacks[N_CHANS]; }; /* fake analog input ranges */ static const struct comedi_lrange waveform_ai_ranges = { 2, { BIP_RANGE(10), BIP_RANGE(5) } }; static unsigned short fake_sawtooth(struct comedi_device *dev, unsigned int range_index, unsigned int current_time) { struct waveform_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; unsigned int offset = s->maxdata / 2; u64 value; const struct comedi_krange *krange = &s->range_table->range[range_index]; u64 binary_amplitude; binary_amplitude = s->maxdata; binary_amplitude *= devpriv->wf_amplitude; do_div(binary_amplitude, krange->max - krange->min); value = current_time; value *= binary_amplitude * 2; do_div(value, devpriv->wf_period); value += offset; /* get rid of sawtooth's dc offset and clamp value */ if (value < binary_amplitude) { value = 0; /* negative saturation */ } else { value -= binary_amplitude; if (value > s->maxdata) value = s->maxdata; /* positive saturation */ } return value; } static unsigned short fake_squarewave(struct comedi_device *dev, unsigned int range_index, unsigned int current_time) { struct waveform_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; unsigned int offset = s->maxdata / 2; u64 value; const struct comedi_krange *krange = &s->range_table->range[range_index]; value = s->maxdata; value *= devpriv->wf_amplitude; do_div(value, krange->max - krange->min); /* get one of two values for square-wave and clamp */ if (current_time < devpriv->wf_period / 2) { if (offset < value) value = 0; /* negative saturation */ else value = offset - value; } else { value += offset; if (value > s->maxdata) value = s->maxdata; /* positive saturation */ } return value; } static unsigned short fake_flatline(struct comedi_device *dev, unsigned int range_index, unsigned int current_time) { return dev->read_subdev->maxdata / 2; } /* generates a different waveform depending on what channel is read */ static unsigned short fake_waveform(struct comedi_device *dev, unsigned int channel, unsigned int range, unsigned int current_time) { enum { SAWTOOTH_CHAN, SQUARE_CHAN, }; switch (channel) { case SAWTOOTH_CHAN: return fake_sawtooth(dev, range, current_time); case SQUARE_CHAN: return fake_squarewave(dev, range, current_time); default: break; } return fake_flatline(dev, range, current_time); } /* * This is the background routine used to generate arbitrary data. * It should run in the background; therefore it is scheduled by * a timer mechanism. */ static void waveform_ai_timer(struct timer_list *t) { struct waveform_private *devpriv = timer_container_of(devpriv, t, ai_timer); struct comedi_device *dev = devpriv->dev; struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; u64 now; unsigned int nsamples; unsigned int time_increment; now = ktime_to_us(ktime_get()); nsamples = comedi_nsamples_left(s, UINT_MAX); while (nsamples && devpriv->ai_convert_time < now) { unsigned int chanspec = cmd->chanlist[async->cur_chan]; unsigned short sample; sample = fake_waveform(dev, CR_CHAN(chanspec), CR_RANGE(chanspec), devpriv->wf_current); if (comedi_buf_write_samples(s, &sample, 1) == 0) goto overrun; time_increment = devpriv->ai_convert_period; if (async->scan_progress == 0) { /* done last conversion in scan, so add dead time */ time_increment += devpriv->ai_scan_period - devpriv->ai_convert_period * cmd->scan_end_arg; } devpriv->wf_current += time_increment; if (devpriv->wf_current >= devpriv->wf_period) devpriv->wf_current %= devpriv->wf_period; devpriv->ai_convert_time += time_increment; nsamples--; } if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg) { async->events |= COMEDI_CB_EOA; } else { if (devpriv->ai_convert_time > now) time_increment = devpriv->ai_convert_time - now; else time_increment = 1; spin_lock(&dev->spinlock); if (devpriv->ai_timer_enable) { mod_timer(&devpriv->ai_timer, jiffies + usecs_to_jiffies(time_increment)); } spin_unlock(&dev->spinlock); } overrun: comedi_handle_events(dev, s); } static int waveform_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; unsigned int arg, limit; /* Step 1 : check if triggers are trivially valid */ err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW); err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW | TRIG_TIMER); err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW | TRIG_TIMER); err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ err |= comedi_check_trigger_is_unique(cmd->convert_src); err |= comedi_check_trigger_is_unique(cmd->stop_src); /* Step 2b : and mutually compatible */ if (cmd->scan_begin_src == TRIG_FOLLOW && cmd->convert_src == TRIG_NOW) err |= -EINVAL; /* scan period would be 0 */ if (err) return 2; /* Step 3: check if arguments are trivially valid */ err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0); if (cmd->convert_src == TRIG_NOW) { err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); } else { /* cmd->convert_src == TRIG_TIMER */ if (cmd->scan_begin_src == TRIG_FOLLOW) { err |= comedi_check_trigger_arg_min(&cmd->convert_arg, NSEC_PER_USEC); } } if (cmd->scan_begin_src == TRIG_FOLLOW) { err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0); } else { /* cmd->scan_begin_src == TRIG_TIMER */ err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, NSEC_PER_USEC); } err |= comedi_check_trigger_arg_min(&cmd->chanlist_len, 1); err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); if (cmd->stop_src == TRIG_COUNT) err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1); else /* cmd->stop_src == TRIG_NONE */ err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; /* step 4: fix up any arguments */ if (cmd->convert_src == TRIG_TIMER) { /* round convert_arg to nearest microsecond */ arg = cmd->convert_arg; arg = min(arg, rounddown(UINT_MAX, (unsigned int)NSEC_PER_USEC)); arg = NSEC_PER_USEC * DIV_ROUND_CLOSEST(arg, NSEC_PER_USEC); if (cmd->scan_begin_arg == TRIG_TIMER) { /* limit convert_arg to keep scan_begin_arg in range */ limit = UINT_MAX / cmd->scan_end_arg; limit = rounddown(limit, (unsigned int)NSEC_PER_SEC); arg = min(arg, limit); } err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg); } if (cmd->scan_begin_src == TRIG_TIMER) { /* round scan_begin_arg to nearest microsecond */ arg = cmd->scan_begin_arg; arg = min(arg, rounddown(UINT_MAX, (unsigned int)NSEC_PER_USEC)); arg = NSEC_PER_USEC * DIV_ROUND_CLOSEST(arg, NSEC_PER_USEC); if (cmd->convert_src == TRIG_TIMER) { /* but ensure scan_begin_arg is large enough */ arg = max(arg, cmd->convert_arg * cmd->scan_end_arg); } err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg); } if (err) return 4; return 0; } static int waveform_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct waveform_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; unsigned int first_convert_time; u64 wf_current; if (cmd->flags & CMDF_PRIORITY) { dev_err(dev->class_dev, "commands at RT priority not supported in this driver\n"); return -1; } if (cmd->convert_src == TRIG_NOW) devpriv->ai_convert_period = 0; else /* cmd->convert_src == TRIG_TIMER */ devpriv->ai_convert_period = cmd->convert_arg / NSEC_PER_USEC; if (cmd->scan_begin_src == TRIG_FOLLOW) { devpriv->ai_scan_period = devpriv->ai_convert_period * cmd->scan_end_arg; } else { /* cmd->scan_begin_src == TRIG_TIMER */ devpriv->ai_scan_period = cmd->scan_begin_arg / NSEC_PER_USEC; } /* * Simulate first conversion to occur at convert period after * conversion timer starts. If scan_begin_src is TRIG_FOLLOW, assume * the conversion timer starts immediately. If scan_begin_src is * TRIG_TIMER, assume the conversion timer starts after the scan * period. */ first_convert_time = devpriv->ai_convert_period; if (cmd->scan_begin_src == TRIG_TIMER) first_convert_time += devpriv->ai_scan_period; devpriv->ai_convert_time = ktime_to_us(ktime_get()) + first_convert_time; /* Determine time within waveform period at time of conversion. */ wf_current = devpriv->ai_convert_time; devpriv->wf_current = do_div(wf_current, devpriv->wf_period); /* * Schedule timer to expire just after first conversion time. * Seem to need an extra jiffy here, otherwise timer expires slightly * early! */ spin_lock_bh(&dev->spinlock); devpriv->ai_timer_enable = true; devpriv->ai_timer.expires = jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1; add_timer(&devpriv->ai_timer); spin_unlock_bh(&dev->spinlock); return 0; } static int waveform_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct waveform_private *devpriv = dev->private; spin_lock_bh(&dev->spinlock); devpriv->ai_timer_enable = false; spin_unlock_bh(&dev->spinlock); if (in_softirq()) { /* Assume we were called from the timer routine itself. */ timer_delete(&devpriv->ai_timer); } else { timer_delete_sync(&devpriv->ai_timer); } return 0; } static int waveform_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct waveform_private *devpriv = dev->private; int i, chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) data[i] = devpriv->ao_loopbacks[chan]; return insn->n; } /* * This is the background routine to handle AO commands, scheduled by * a timer mechanism. */ static void waveform_ao_timer(struct timer_list *t) { struct waveform_private *devpriv = timer_container_of(devpriv, t, ao_timer); struct comedi_device *dev = devpriv->dev; struct comedi_subdevice *s = dev->write_subdev; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; u64 now; u64 scans_since; unsigned int scans_avail = 0; /* determine number of scan periods since last time */ now = ktime_to_us(ktime_get()); scans_since = now - devpriv->ao_last_scan_time; do_div(scans_since, devpriv->ao_scan_period); if (scans_since) { unsigned int i; /* determine scans in buffer, limit to scans to do this time */ scans_avail = comedi_nscans_left(s, 0); if (scans_avail > scans_since) scans_avail = scans_since; if (scans_avail) { /* skip all but the last scan to save processing time */ if (scans_avail > 1) { unsigned int skip_bytes, nbytes; skip_bytes = comedi_samples_to_bytes(s, cmd->scan_end_arg * (scans_avail - 1)); nbytes = comedi_buf_read_alloc(s, skip_bytes); comedi_buf_read_free(s, nbytes); comedi_inc_scan_progress(s, nbytes); if (nbytes < skip_bytes) { /* unexpected underrun! (cancelled?) */ async->events |= COMEDI_CB_OVERFLOW; goto underrun; } } /* output the last scan */ for (i = 0; i < cmd->scan_end_arg; i++) { unsigned int chan = CR_CHAN(cmd->chanlist[i]); unsigned short *pd; pd = &devpriv->ao_loopbacks[chan]; if (!comedi_buf_read_samples(s, pd, 1)) { /* unexpected underrun! (cancelled?) */ async->events |= COMEDI_CB_OVERFLOW; goto underrun; } } /* advance time of last scan */ devpriv->ao_last_scan_time += (u64)scans_avail * devpriv->ao_scan_period; } } if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg) { async->events |= COMEDI_CB_EOA; } else if (scans_avail < scans_since) { async->events |= COMEDI_CB_OVERFLOW; } else { unsigned int time_inc = devpriv->ao_last_scan_time + devpriv->ao_scan_period - now; spin_lock(&dev->spinlock); if (devpriv->ao_timer_enable) { mod_timer(&devpriv->ao_timer, jiffies + usecs_to_jiffies(time_inc)); } spin_unlock(&dev->spinlock); } underrun: comedi_handle_events(dev, s); } static int waveform_ao_inttrig_start(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trig_num) { struct waveform_private *devpriv = dev->private; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; if (trig_num != cmd->start_arg) return -EINVAL; async->inttrig = NULL; devpriv->ao_last_scan_time = ktime_to_us(ktime_get()); spin_lock_bh(&dev->spinlock); devpriv->ao_timer_enable = true; devpriv->ao_timer.expires = jiffies + usecs_to_jiffies(devpriv->ao_scan_period); add_timer(&devpriv->ao_timer); spin_unlock_bh(&dev->spinlock); return 1; } static int waveform_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; unsigned int arg; /* Step 1 : check if triggers are trivially valid */ err |= comedi_check_trigger_src(&cmd->start_src, TRIG_INT); err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER); err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW); err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ err |= comedi_check_trigger_is_unique(cmd->stop_src); /* Step 2b : and mutually compatible */ if (err) return 2; /* Step 3: check if arguments are trivially valid */ err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, NSEC_PER_USEC); err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); err |= comedi_check_trigger_arg_min(&cmd->chanlist_len, 1); err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); if (cmd->stop_src == TRIG_COUNT) err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1); else /* cmd->stop_src == TRIG_NONE */ err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; /* step 4: fix up any arguments */ /* round scan_begin_arg to nearest microsecond */ arg = cmd->scan_begin_arg; arg = min(arg, rounddown(UINT_MAX, (unsigned int)NSEC_PER_USEC)); arg = NSEC_PER_USEC * DIV_ROUND_CLOSEST(arg, NSEC_PER_USEC); err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg); if (err) return 4; return 0; } static int waveform_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct waveform_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; if (cmd->flags & CMDF_PRIORITY) { dev_err(dev->class_dev, "commands at RT priority not supported in this driver\n"); return -1; } devpriv->ao_scan_period = cmd->scan_begin_arg / NSEC_PER_USEC; s->async->inttrig = waveform_ao_inttrig_start; return 0; } static int waveform_ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct waveform_private *devpriv = dev->private; s->async->inttrig = NULL; spin_lock_bh(&dev->spinlock); devpriv->ao_timer_enable = false; spin_unlock_bh(&dev->spinlock); if (in_softirq()) { /* Assume we were called from the timer routine itself. */ timer_delete(&devpriv->ao_timer); } else { timer_delete_sync(&devpriv->ao_timer); } return 0; } static int waveform_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct waveform_private *devpriv = dev->private; int i, chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) devpriv->ao_loopbacks[chan] = data[i]; return insn->n; } static int waveform_ai_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (data[0] == INSN_CONFIG_GET_CMD_TIMING_CONSTRAINTS) { /* * input: data[1], data[2] : scan_begin_src, convert_src * output: data[1], data[2] : scan_begin_min, convert_min */ if (data[1] == TRIG_FOLLOW) { /* exactly TRIG_FOLLOW case */ data[1] = 0; data[2] = NSEC_PER_USEC; } else { data[1] = NSEC_PER_USEC; if (data[2] & TRIG_TIMER) data[2] = NSEC_PER_USEC; else data[2] = 0; } return 0; } return -EINVAL; } static int waveform_ao_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (data[0] == INSN_CONFIG_GET_CMD_TIMING_CONSTRAINTS) { /* we don't care about actual channels */ data[1] = NSEC_PER_USEC; /* scan_begin_min */ data[2] = 0; /* convert_min */ return 0; } return -EINVAL; } static int waveform_common_attach(struct comedi_device *dev, int amplitude, int period) { struct waveform_private *devpriv; struct comedi_subdevice *s; int i; int ret; devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) return -ENOMEM; devpriv->wf_amplitude = amplitude; devpriv->wf_period = period; ret = comedi_alloc_subdevices(dev, 2); if (ret) return ret; s = &dev->subdevices[0]; dev->read_subdev = s; /* analog input subdevice */ s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ; s->n_chan = N_CHANS; s->maxdata = 0xffff; s->range_table = &waveform_ai_ranges; s->len_chanlist = s->n_chan * 2; s->insn_read = waveform_ai_insn_read; s->do_cmd = waveform_ai_cmd; s->do_cmdtest = waveform_ai_cmdtest; s->cancel = waveform_ai_cancel; s->insn_config = waveform_ai_insn_config; s = &dev->subdevices[1]; dev->write_subdev = s; /* analog output subdevice (loopback) */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE; s->n_chan = N_CHANS; s->maxdata = 0xffff; s->range_table = &waveform_ai_ranges; s->len_chanlist = s->n_chan; s->insn_write = waveform_ao_insn_write; s->insn_read = waveform_ai_insn_read; /* do same as AI insn_read */ s->do_cmd = waveform_ao_cmd; s->do_cmdtest = waveform_ao_cmdtest; s->cancel = waveform_ao_cancel; s->insn_config = waveform_ao_insn_config; /* Our default loopback value is just a 0V flatline */ for (i = 0; i < s->n_chan; i++) devpriv->ao_loopbacks[i] = s->maxdata / 2; devpriv->dev = dev; timer_setup(&devpriv->ai_timer, waveform_ai_timer, 0); timer_setup(&devpriv->ao_timer, waveform_ao_timer, 0); dev_info(dev->class_dev, "%s: %u microvolt, %u microsecond waveform attached\n", dev->board_name, devpriv->wf_amplitude, devpriv->wf_period); return 0; } static int waveform_attach(struct comedi_device *dev, struct comedi_devconfig *it) { int amplitude = it->options[0]; int period = it->options[1]; /* set default amplitude and period */ if (amplitude <= 0) amplitude = 1000000; /* 1 volt */ if (period <= 0) period = 100000; /* 0.1 sec */ return waveform_common_attach(dev, amplitude, period); } static int waveform_auto_attach(struct comedi_device *dev, unsigned long context_unused) { int amplitude = set_amplitude; int period = set_period; /* set default amplitude and period */ if (!amplitude) amplitude = 1000000; /* 1 volt */ if (!period) period = 100000; /* 0.1 sec */ return waveform_common_attach(dev, amplitude, period); } static void waveform_detach(struct comedi_device *dev) { struct waveform_private *devpriv = dev->private; if (devpriv && dev->n_subdevices) { timer_delete_sync(&devpriv->ai_timer); timer_delete_sync(&devpriv->ao_timer); } } static struct comedi_driver waveform_driver = { .driver_name = "comedi_test", .module = THIS_MODULE, .attach = waveform_attach, .auto_attach = waveform_auto_attach, .detach = waveform_detach, }; /* * For auto-configuration, a device is created to stand in for a * real hardware device. */ static int __init comedi_test_init(void) { int ret; ret = comedi_driver_register(&waveform_driver); if (ret) { pr_err("comedi_test: unable to register driver\n"); return ret; } if (!config_mode) { ret = class_register(&ctcls); if (ret) { pr_warn("comedi_test: unable to create class\n"); goto clean3; } ctdev = device_create(&ctcls, NULL, MKDEV(0, 0), NULL, DEV_NAME); if (IS_ERR(ctdev)) { pr_warn("comedi_test: unable to create device\n"); goto clean2; } ret = comedi_auto_config(ctdev, &waveform_driver, 0); if (ret) { pr_warn("comedi_test: unable to auto-configure device\n"); goto clean; } } return 0; clean: device_destroy(&ctcls, MKDEV(0, 0)); clean2: class_unregister(&ctcls); clean3: return 0; } module_init(comedi_test_init); static void __exit comedi_test_exit(void) { if (ctdev) comedi_auto_unconfig(ctdev); if (class_is_registered(&ctcls)) { device_destroy(&ctcls, MKDEV(0, 0)); class_unregister(&ctcls); } comedi_driver_unregister(&waveform_driver); } module_exit(comedi_test_exit); MODULE_AUTHOR("Comedi https://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
1 1 1 2 1 1 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 // SPDX-License-Identifier: GPL-2.0-or-later /* * Force feedback support for EMS Trio Linker Plus II * * Copyright (c) 2010 Ignaz Forster <ignaz.forster@gmx.de> */ /* */ #include <linux/hid.h> #include <linux/input.h> #include <linux/module.h> #include "hid-ids.h" struct emsff_device { struct hid_report *report; }; static int emsff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct emsff_device *emsff = data; int weak, strong; weak = effect->u.rumble.weak_magnitude; strong = effect->u.rumble.strong_magnitude; dbg_hid("called with 0x%04x 0x%04x\n", strong, weak); weak = weak * 0xff / 0xffff; strong = strong * 0xff / 0xffff; emsff->report->field[0]->value[1] = weak; emsff->report->field[0]->value[2] = strong; dbg_hid("running with 0x%02x 0x%02x\n", strong, weak); hid_hw_request(hid, emsff->report, HID_REQ_SET_REPORT); return 0; } static int emsff_init(struct hid_device *hid) { struct emsff_device *emsff; struct hid_report *report; struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct input_dev *dev; int error; if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); return -ENODEV; } hidinput = list_first_entry(&hid->inputs, struct hid_input, list); dev = hidinput->input; if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; } report = list_first_entry(report_list, struct hid_report, list); if (report->maxfield < 1) { hid_err(hid, "no fields in the report\n"); return -ENODEV; } if (report->field[0]->report_count < 7) { hid_err(hid, "not enough values in the field\n"); return -ENODEV; } emsff = kzalloc(sizeof(struct emsff_device), GFP_KERNEL); if (!emsff) return -ENOMEM; set_bit(FF_RUMBLE, dev->ffbit); error = input_ff_create_memless(dev, emsff, emsff_play); if (error) { kfree(emsff); return error; } emsff->report = report; emsff->report->field[0]->value[0] = 0x01; emsff->report->field[0]->value[1] = 0x00; emsff->report->field[0]->value[2] = 0x00; emsff->report->field[0]->value[3] = 0x00; emsff->report->field[0]->value[4] = 0x00; emsff->report->field[0]->value[5] = 0x00; emsff->report->field[0]->value[6] = 0x00; hid_hw_request(hid, emsff->report, HID_REQ_SET_REPORT); hid_info(hid, "force feedback for EMS based devices by Ignaz Forster <ignaz.forster@gmx.de>\n"); return 0; } static int ems_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } ret = emsff_init(hdev); if (ret) { dev_err(&hdev->dev, "force feedback init failed\n"); hid_hw_stop(hdev); goto err; } return 0; err: return ret; } static const struct hid_device_id ems_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) }, { } }; MODULE_DEVICE_TABLE(hid, ems_devices); static struct hid_driver ems_driver = { .name = "hkems", .id_table = ems_devices, .probe = ems_probe, }; module_hid_driver(ems_driver); MODULE_DESCRIPTION("Force feedback support for EMS Trio Linker Plus II"); MODULE_LICENSE("GPL");
128 127 128 127 126 11 31 11 31 31 31 31 31 31 20 20 20 63 63 62 63 63 63 81 81 81 68 79 80 4 81 81 11 11 11 8 1 1 1 1 8 10 10 10 10 3 3 3 3 3 3 3 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2015 Intel Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/hci_mon.h> #include <net/bluetooth/mgmt.h> #include "mgmt_util.h" static struct sk_buff *create_monitor_ctrl_event(__le16 index, u32 cookie, u16 opcode, u16 len, void *buf) { struct hci_mon_hdr *hdr; struct sk_buff *skb; skb = bt_skb_alloc(6 + len, GFP_ATOMIC); if (!skb) return NULL; put_unaligned_le32(cookie, skb_put(skb, 4)); put_unaligned_le16(opcode, skb_put(skb, 2)); if (buf) skb_put_data(skb, buf, len); __net_timestamp(skb); hdr = skb_push(skb, HCI_MON_HDR_SIZE); hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT); hdr->index = index; hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); return skb; } struct sk_buff *mgmt_alloc_skb(struct hci_dev *hdev, u16 opcode, unsigned int size) { struct sk_buff *skb; skb = alloc_skb(sizeof(struct mgmt_hdr) + size, GFP_KERNEL); if (!skb) return skb; skb_reserve(skb, sizeof(struct mgmt_hdr)); bt_cb(skb)->mgmt.hdev = hdev; bt_cb(skb)->mgmt.opcode = opcode; return skb; } int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag, struct sock *skip_sk) { struct hci_dev *hdev; struct mgmt_hdr *hdr; int len; if (!skb) return -EINVAL; len = skb->len; hdev = bt_cb(skb)->mgmt.hdev; /* Time stamp */ __net_timestamp(skb); /* Send just the data, without headers, to the monitor */ if (channel == HCI_CHANNEL_CONTROL) hci_send_monitor_ctrl_event(hdev, bt_cb(skb)->mgmt.opcode, skb->data, skb->len, skb_get_ktime(skb), flag, skip_sk); hdr = skb_push(skb, sizeof(*hdr)); hdr->opcode = cpu_to_le16(bt_cb(skb)->mgmt.opcode); if (hdev) hdr->index = cpu_to_le16(hdev->id); else hdr->index = cpu_to_le16(MGMT_INDEX_NONE); hdr->len = cpu_to_le16(len); hci_send_to_channel(channel, skb, flag, skip_sk); kfree_skb(skb); return 0; } int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, void *data, u16 data_len, int flag, struct sock *skip_sk) { struct sk_buff *skb; skb = mgmt_alloc_skb(hdev, event, data_len); if (!skb) return -ENOMEM; if (data) skb_put_data(skb, data, data_len); return mgmt_send_event_skb(channel, skb, flag, skip_sk); } int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) { struct sk_buff *skb, *mskb; struct mgmt_hdr *hdr; struct mgmt_ev_cmd_status *ev; int err; BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL); if (!skb) return -ENOMEM; hdr = skb_put(skb, sizeof(*hdr)); hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS); hdr->index = cpu_to_le16(index); hdr->len = cpu_to_le16(sizeof(*ev)); ev = skb_put(skb, sizeof(*ev)); ev->status = status; ev->opcode = cpu_to_le16(cmd); mskb = create_monitor_ctrl_event(hdr->index, hci_sock_get_cookie(sk), MGMT_EV_CMD_STATUS, sizeof(*ev), ev); if (mskb) skb->tstamp = mskb->tstamp; else __net_timestamp(skb); err = sock_queue_rcv_skb(sk, skb); if (err < 0) kfree_skb(skb); if (mskb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, mskb, HCI_SOCK_TRUSTED, NULL); kfree_skb(mskb); } return err; } int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, void *rp, size_t rp_len) { struct sk_buff *skb, *mskb; struct mgmt_hdr *hdr; struct mgmt_ev_cmd_complete *ev; int err; BT_DBG("sock %p", sk); skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL); if (!skb) return -ENOMEM; hdr = skb_put(skb, sizeof(*hdr)); hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); hdr->index = cpu_to_le16(index); hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); ev = skb_put(skb, sizeof(*ev) + rp_len); ev->opcode = cpu_to_le16(cmd); ev->status = status; if (rp) memcpy(ev->data, rp, rp_len); mskb = create_monitor_ctrl_event(hdr->index, hci_sock_get_cookie(sk), MGMT_EV_CMD_COMPLETE, sizeof(*ev) + rp_len, ev); if (mskb) skb->tstamp = mskb->tstamp; else __net_timestamp(skb); err = sock_queue_rcv_skb(sk, skb); if (err < 0) kfree_skb(skb); if (mskb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, mskb, HCI_SOCK_TRUSTED, NULL); kfree_skb(mskb); } return err; } struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, struct hci_dev *hdev) { struct mgmt_pending_cmd *cmd, *tmp; mutex_lock(&hdev->mgmt_pending_lock); list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { if (hci_sock_get_channel(cmd->sk) != channel) continue; if (cmd->opcode == opcode) { mutex_unlock(&hdev->mgmt_pending_lock); return cmd; } } mutex_unlock(&hdev->mgmt_pending_lock); return NULL; } void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, bool remove, void (*cb)(struct mgmt_pending_cmd *cmd, void *data), void *data) { struct mgmt_pending_cmd *cmd, *tmp; mutex_lock(&hdev->mgmt_pending_lock); list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { if (opcode > 0 && cmd->opcode != opcode) continue; if (remove) list_del(&cmd->list); cb(cmd, data); if (remove) mgmt_pending_free(cmd); } mutex_unlock(&hdev->mgmt_pending_lock); } struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_pending_cmd *cmd; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return NULL; cmd->opcode = opcode; cmd->hdev = hdev; cmd->param = kmemdup(data, len, GFP_KERNEL); if (!cmd->param) { kfree(cmd); return NULL; } cmd->param_len = len; cmd->sk = sk; sock_hold(sk); return cmd; } struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_pending_cmd *cmd; cmd = mgmt_pending_new(sk, opcode, hdev, data, len); if (!cmd) return NULL; mutex_lock(&hdev->mgmt_pending_lock); list_add_tail(&cmd->list, &hdev->mgmt_pending); mutex_unlock(&hdev->mgmt_pending_lock); return cmd; } void mgmt_pending_free(struct mgmt_pending_cmd *cmd) { sock_put(cmd->sk); kfree(cmd->param); kfree(cmd); } void mgmt_pending_remove(struct mgmt_pending_cmd *cmd) { mutex_lock(&cmd->hdev->mgmt_pending_lock); list_del(&cmd->list); mutex_unlock(&cmd->hdev->mgmt_pending_lock); mgmt_pending_free(cmd); } bool __mgmt_pending_listed(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd) { struct mgmt_pending_cmd *tmp; lockdep_assert_held(&hdev->mgmt_pending_lock); if (!cmd) return false; list_for_each_entry(tmp, &hdev->mgmt_pending, list) { if (cmd == tmp) return true; } return false; } bool mgmt_pending_listed(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd) { bool listed; mutex_lock(&hdev->mgmt_pending_lock); listed = __mgmt_pending_listed(hdev, cmd); mutex_unlock(&hdev->mgmt_pending_lock); return listed; } bool mgmt_pending_valid(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd) { bool listed; if (!cmd) return false; mutex_lock(&hdev->mgmt_pending_lock); listed = __mgmt_pending_listed(hdev, cmd); if (listed) list_del(&cmd->list); mutex_unlock(&hdev->mgmt_pending_lock); return listed; } void mgmt_mesh_foreach(struct hci_dev *hdev, void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data), void *data, struct sock *sk) { struct mgmt_mesh_tx *mesh_tx, *tmp; list_for_each_entry_safe(mesh_tx, tmp, &hdev->mesh_pending, list) { if (!sk || mesh_tx->sk == sk) cb(mesh_tx, data); } } struct mgmt_mesh_tx *mgmt_mesh_next(struct hci_dev *hdev, struct sock *sk) { struct mgmt_mesh_tx *mesh_tx; if (list_empty(&hdev->mesh_pending)) return NULL; list_for_each_entry(mesh_tx, &hdev->mesh_pending, list) { if (!sk || mesh_tx->sk == sk) return mesh_tx; } return NULL; } struct mgmt_mesh_tx *mgmt_mesh_find(struct hci_dev *hdev, u8 handle) { struct mgmt_mesh_tx *mesh_tx; if (list_empty(&hdev->mesh_pending)) return NULL; list_for_each_entry(mesh_tx, &hdev->mesh_pending, list) { if (mesh_tx->handle == handle) return mesh_tx; } return NULL; } struct mgmt_mesh_tx *mgmt_mesh_add(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mesh_tx *mesh_tx; mesh_tx = kzalloc(sizeof(*mesh_tx), GFP_KERNEL); if (!mesh_tx) return NULL; hdev->mesh_send_ref++; if (!hdev->mesh_send_ref) hdev->mesh_send_ref++; mesh_tx->handle = hdev->mesh_send_ref; mesh_tx->index = hdev->id; memcpy(mesh_tx->param, data, len); mesh_tx->param_len = len; mesh_tx->sk = sk; sock_hold(sk); list_add_tail(&mesh_tx->list, &hdev->mesh_pending); return mesh_tx; } void mgmt_mesh_remove(struct mgmt_mesh_tx *mesh_tx) { list_del(&mesh_tx->list); sock_put(mesh_tx->sk); kfree(mesh_tx); }
4466 5043 198 2150 2140 2656 1993 28 190 2033 1935 2004 3824 2441 2276 2469 1975 2031 2031 1978 3485 11 3490 3778 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 // SPDX-License-Identifier: GPL-2.0 /* * device.h - generic, centralized driver model * * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2008-2009 Novell Inc. * * See Documentation/driver-api/driver-model/ for more information. */ #ifndef _DEVICE_H_ #define _DEVICE_H_ #include <linux/dev_printk.h> #include <linux/energy_model.h> #include <linux/ioport.h> #include <linux/kobject.h> #include <linux/klist.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/compiler.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/pm.h> #include <linux/atomic.h> #include <linux/uidgid.h> #include <linux/gfp.h> #include <linux/device/bus.h> #include <linux/device/class.h> #include <linux/device/devres.h> #include <linux/device/driver.h> #include <linux/cleanup.h> #include <asm/device.h> struct device; struct device_private; struct device_driver; struct driver_private; struct module; struct class; struct subsys_private; struct device_node; struct fwnode_handle; struct iommu_group; struct dev_pin_info; struct dev_iommu; struct msi_device_data; /** * struct subsys_interface - interfaces to device functions * @name: name of the device function * @subsys: subsystem of the devices to attach to * @node: the list of functions registered at the subsystem * @add_dev: device hookup to device function handler * @remove_dev: device hookup to device function handler * * Simple interfaces attached to a subsystem. Multiple interfaces can * attach to a subsystem and its devices. Unlike drivers, they do not * exclusively claim or control devices. Interfaces usually represent * a specific functionality of a subsystem/class of devices. */ struct subsys_interface { const char *name; const struct bus_type *subsys; struct list_head node; int (*add_dev)(struct device *dev, struct subsys_interface *sif); void (*remove_dev)(struct device *dev, struct subsys_interface *sif); }; int subsys_interface_register(struct subsys_interface *sif); void subsys_interface_unregister(struct subsys_interface *sif); int subsys_system_register(const struct bus_type *subsys, const struct attribute_group **groups); int subsys_virtual_register(const struct bus_type *subsys, const struct attribute_group **groups); /* * The type of device, "struct device" is embedded in. A class * or bus can contain devices of different types * like "partitions" and "disks", "mouse" and "event". * This identifies the device type and carries type-specific * information, equivalent to the kobj_type of a kobject. * If "name" is specified, the uevent will contain it in * the DEVTYPE variable. */ struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(const struct device *dev, struct kobj_uevent_env *env); char *(*devnode)(const struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid); void (*release)(struct device *dev); const struct dev_pm_ops *pm; }; /** * struct device_attribute - Interface for exporting device attributes. * @attr: sysfs attribute definition. * @show: Show handler. * @store: Store handler. */ struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *dev, struct device_attribute *attr, char *buf); ssize_t (*store)(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); }; /** * struct dev_ext_attribute - Exported device attribute with extra context. * @attr: Exported device attribute. * @var: Pointer to context. */ struct dev_ext_attribute { struct device_attribute attr; void *var; }; ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr, char *buf); ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); ssize_t device_show_int(struct device *dev, struct device_attribute *attr, char *buf); ssize_t device_store_int(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, char *buf); ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); ssize_t device_show_string(struct device *dev, struct device_attribute *attr, char *buf); /** * DEVICE_ATTR - Define a device attribute. * @_name: Attribute name. * @_mode: File mode. * @_show: Show handler. Optional, but mandatory if attribute is readable. * @_store: Store handler. Optional, but mandatory if attribute is writable. * * Convenience macro for defining a struct device_attribute. * * For example, ``DEVICE_ATTR(foo, 0644, foo_show, foo_store);`` expands to: * * .. code-block:: c * * struct device_attribute dev_attr_foo = { * .attr = { .name = "foo", .mode = 0644 }, * .show = foo_show, * .store = foo_store, * }; */ #define DEVICE_ATTR(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) /** * DEVICE_ATTR_PREALLOC - Define a preallocated device attribute. * @_name: Attribute name. * @_mode: File mode. * @_show: Show handler. Optional, but mandatory if attribute is readable. * @_store: Store handler. Optional, but mandatory if attribute is writable. * * Like DEVICE_ATTR(), but ``SYSFS_PREALLOC`` is set on @_mode. */ #define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = \ __ATTR_PREALLOC(_name, _mode, _show, _store) /** * DEVICE_ATTR_RW - Define a read-write device attribute. * @_name: Attribute name. * * Like DEVICE_ATTR(), but @_mode is 0644, @_show is <_name>_show, * and @_store is <_name>_store. */ #define DEVICE_ATTR_RW(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RW(_name) /** * DEVICE_ATTR_ADMIN_RW - Define an admin-only read-write device attribute. * @_name: Attribute name. * * Like DEVICE_ATTR_RW(), but @_mode is 0600. */ #define DEVICE_ATTR_ADMIN_RW(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600) /** * DEVICE_ATTR_RO - Define a readable device attribute. * @_name: Attribute name. * * Like DEVICE_ATTR(), but @_mode is 0444 and @_show is <_name>_show. */ #define DEVICE_ATTR_RO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RO(_name) /** * DEVICE_ATTR_ADMIN_RO - Define an admin-only readable device attribute. * @_name: Attribute name. * * Like DEVICE_ATTR_RO(), but @_mode is 0400. */ #define DEVICE_ATTR_ADMIN_RO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400) /** * DEVICE_ATTR_WO - Define an admin-only writable device attribute. * @_name: Attribute name. * * Like DEVICE_ATTR(), but @_mode is 0200 and @_store is <_name>_store. */ #define DEVICE_ATTR_WO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_WO(_name) /** * DEVICE_ULONG_ATTR - Define a device attribute backed by an unsigned long. * @_name: Attribute name. * @_mode: File mode. * @_var: Identifier of unsigned long. * * Like DEVICE_ATTR(), but @_show and @_store are automatically provided * such that reads and writes to the attribute from userspace affect @_var. */ #define DEVICE_ULONG_ATTR(_name, _mode, _var) \ struct dev_ext_attribute dev_attr_##_name = \ { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) } /** * DEVICE_INT_ATTR - Define a device attribute backed by an int. * @_name: Attribute name. * @_mode: File mode. * @_var: Identifier of int. * * Like DEVICE_ULONG_ATTR(), but @_var is an int. */ #define DEVICE_INT_ATTR(_name, _mode, _var) \ struct dev_ext_attribute dev_attr_##_name = \ { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) } /** * DEVICE_BOOL_ATTR - Define a device attribute backed by a bool. * @_name: Attribute name. * @_mode: File mode. * @_var: Identifier of bool. * * Like DEVICE_ULONG_ATTR(), but @_var is a bool. */ #define DEVICE_BOOL_ATTR(_name, _mode, _var) \ struct dev_ext_attribute dev_attr_##_name = \ { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) } /** * DEVICE_STRING_ATTR_RO - Define a device attribute backed by a r/o string. * @_name: Attribute name. * @_mode: File mode. * @_var: Identifier of string. * * Like DEVICE_ULONG_ATTR(), but @_var is a string. Because the length of the * string allocation is unknown, the attribute must be read-only. */ #define DEVICE_STRING_ATTR_RO(_name, _mode, _var) \ struct dev_ext_attribute dev_attr_##_name = \ { __ATTR(_name, (_mode) & ~0222, device_show_string, NULL), (_var) } #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = \ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) int device_create_file(struct device *device, const struct device_attribute *entry); void device_remove_file(struct device *dev, const struct device_attribute *attr); bool device_remove_file_self(struct device *dev, const struct device_attribute *attr); int __must_check device_create_bin_file(struct device *dev, const struct bin_attribute *attr); void device_remove_bin_file(struct device *dev, const struct bin_attribute *attr); /** * devm_alloc_percpu - Resource-managed alloc_percpu * @dev: Device to allocate per-cpu memory for * @type: Type to allocate per-cpu memory for * * Managed alloc_percpu. Per-cpu memory allocated with this function is * automatically freed on driver detach. * * RETURNS: * Pointer to allocated memory on success, NULL on failure. */ #define devm_alloc_percpu(dev, type) \ ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \ __alignof__(type))) void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, size_t align); void devm_free_percpu(struct device *dev, void __percpu *pdata); struct device_dma_parameters { /* * a low level driver may set these to teach IOMMU code about * sg limitations. */ unsigned int max_segment_size; unsigned int min_align_mask; unsigned long segment_boundary_mask; }; /** * enum device_link_state - Device link states. * @DL_STATE_NONE: The presence of the drivers is not being tracked. * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present. * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not. * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present). * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present. * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding. */ enum device_link_state { DL_STATE_NONE = -1, DL_STATE_DORMANT = 0, DL_STATE_AVAILABLE, DL_STATE_CONSUMER_PROBE, DL_STATE_ACTIVE, DL_STATE_SUPPLIER_UNBIND, }; /* * Device link flags. * * STATELESS: The core will not remove this link automatically. * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind. * PM_RUNTIME: If set, the runtime PM framework will use this link. * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation. * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind. * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds. * MANAGED: The core tracks presence of supplier/consumer drivers (internal). * SYNC_STATE_ONLY: Link only affects sync_state() behavior. * INFERRED: Inferred from data (eg: firmware) and not from driver actions. */ #define DL_FLAG_STATELESS BIT(0) #define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1) #define DL_FLAG_PM_RUNTIME BIT(2) #define DL_FLAG_RPM_ACTIVE BIT(3) #define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4) #define DL_FLAG_AUTOPROBE_CONSUMER BIT(5) #define DL_FLAG_MANAGED BIT(6) #define DL_FLAG_SYNC_STATE_ONLY BIT(7) #define DL_FLAG_INFERRED BIT(8) #define DL_FLAG_CYCLE BIT(9) /** * enum dl_dev_state - Device driver presence tracking information. * @DL_DEV_NO_DRIVER: There is no driver attached to the device. * @DL_DEV_PROBING: A driver is probing. * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device. * @DL_DEV_UNBINDING: The driver is unbinding from the device. */ enum dl_dev_state { DL_DEV_NO_DRIVER = 0, DL_DEV_PROBING, DL_DEV_DRIVER_BOUND, DL_DEV_UNBINDING, }; /** * enum device_removable - Whether the device is removable. The criteria for a * device to be classified as removable is determined by its subsystem or bus. * @DEVICE_REMOVABLE_NOT_SUPPORTED: This attribute is not supported for this * device (default). * @DEVICE_REMOVABLE_UNKNOWN: Device location is Unknown. * @DEVICE_FIXED: Device is not removable by the user. * @DEVICE_REMOVABLE: Device is removable by the user. */ enum device_removable { DEVICE_REMOVABLE_NOT_SUPPORTED = 0, /* must be 0 */ DEVICE_REMOVABLE_UNKNOWN, DEVICE_FIXED, DEVICE_REMOVABLE, }; /** * struct dev_links_info - Device data related to device links. * @suppliers: List of links to supplier devices. * @consumers: List of links to consumer devices. * @defer_sync: Hook to global list of devices that have deferred sync_state. * @status: Driver status information. */ struct dev_links_info { struct list_head suppliers; struct list_head consumers; struct list_head defer_sync; enum dl_dev_state status; }; /** * struct dev_msi_info - Device data related to MSI * @domain: The MSI interrupt domain associated to the device * @data: Pointer to MSI device data */ struct dev_msi_info { #ifdef CONFIG_GENERIC_MSI_IRQ struct irq_domain *domain; struct msi_device_data *data; #endif }; /** * enum device_physical_location_panel - Describes which panel surface of the * system's housing the device connection point resides on. * @DEVICE_PANEL_TOP: Device connection point is on the top panel. * @DEVICE_PANEL_BOTTOM: Device connection point is on the bottom panel. * @DEVICE_PANEL_LEFT: Device connection point is on the left panel. * @DEVICE_PANEL_RIGHT: Device connection point is on the right panel. * @DEVICE_PANEL_FRONT: Device connection point is on the front panel. * @DEVICE_PANEL_BACK: Device connection point is on the back panel. * @DEVICE_PANEL_UNKNOWN: The panel with device connection point is unknown. */ enum device_physical_location_panel { DEVICE_PANEL_TOP, DEVICE_PANEL_BOTTOM, DEVICE_PANEL_LEFT, DEVICE_PANEL_RIGHT, DEVICE_PANEL_FRONT, DEVICE_PANEL_BACK, DEVICE_PANEL_UNKNOWN, }; /** * enum device_physical_location_vertical_position - Describes vertical * position of the device connection point on the panel surface. * @DEVICE_VERT_POS_UPPER: Device connection point is at upper part of panel. * @DEVICE_VERT_POS_CENTER: Device connection point is at center part of panel. * @DEVICE_VERT_POS_LOWER: Device connection point is at lower part of panel. */ enum device_physical_location_vertical_position { DEVICE_VERT_POS_UPPER, DEVICE_VERT_POS_CENTER, DEVICE_VERT_POS_LOWER, }; /** * enum device_physical_location_horizontal_position - Describes horizontal * position of the device connection point on the panel surface. * @DEVICE_HORI_POS_LEFT: Device connection point is at left part of panel. * @DEVICE_HORI_POS_CENTER: Device connection point is at center part of panel. * @DEVICE_HORI_POS_RIGHT: Device connection point is at right part of panel. */ enum device_physical_location_horizontal_position { DEVICE_HORI_POS_LEFT, DEVICE_HORI_POS_CENTER, DEVICE_HORI_POS_RIGHT, }; /** * struct device_physical_location - Device data related to physical location * of the device connection point. * @panel: Panel surface of the system's housing that the device connection * point resides on. * @vertical_position: Vertical position of the device connection point within * the panel. * @horizontal_position: Horizontal position of the device connection point * within the panel. * @dock: Set if the device connection point resides in a docking station or * port replicator. * @lid: Set if this device connection point resides on the lid of laptop * system. */ struct device_physical_location { enum device_physical_location_panel panel; enum device_physical_location_vertical_position vertical_position; enum device_physical_location_horizontal_position horizontal_position; bool dock; bool lid; }; /** * struct device - The basic device structure * @parent: The device's "parent" device, the device to which it is attached. * In most cases, a parent device is some sort of bus or host * controller. If parent is NULL, the device, is a top-level device, * which is not usually what you want. * @p: Holds the private data of the driver core portions of the device. * See the comment of the struct device_private for detail. * @kobj: A top-level, abstract class from which other classes are derived. * @init_name: Initial name of the device. * @type: The type of device. * This identifies the device type and carries type-specific * information. * @mutex: Mutex to synchronize calls to its driver. * @bus: Type of bus device is on. * @driver: Which driver has allocated this * @platform_data: Platform data specific to the device. * Example: For devices on custom boards, as typical of embedded * and SOC based hardware, Linux often uses platform_data to point * to board-specific structures describing devices and how they * are wired. That can include what ports are available, chip * variants, which GPIO pins act in what additional roles, and so * on. This shrinks the "Board Support Packages" (BSPs) and * minimizes board-specific #ifdefs in drivers. * @driver_data: Private pointer for driver specific info. * @links: Links to suppliers and consumers of this device. * @power: For device power management. * See Documentation/driver-api/pm/devices.rst for details. * @pm_domain: Provide callbacks that are executed during system suspend, * hibernation, system resume and during runtime PM transitions * along with subsystem-level and driver-level callbacks. * @em_pd: device's energy model performance domain * @pins: For device pin management. * See Documentation/driver-api/pin-control.rst for details. * @msi: MSI related data * @numa_node: NUMA node this device is close to. * @dma_ops: DMA mapping operations for this device. * @dma_mask: Dma mask (if dma'ble device). * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all * hardware supports 64-bit addresses for consistent allocations * such descriptors. * @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller * DMA limit than the device itself supports. * @dma_range_map: map for DMA memory ranges relative to that of RAM * @dma_parms: A low level driver may set these to teach IOMMU code about * segment limitations. * @dma_pools: Dma pools (if dma'ble device). * @dma_mem: Internal for coherent mem override. * @cma_area: Contiguous memory area for dma allocations * @dma_io_tlb_mem: Software IO TLB allocator. Not for driver use. * @dma_io_tlb_pools: List of transient swiotlb memory pools. * @dma_io_tlb_lock: Protects changes to the list of active pools. * @dma_uses_io_tlb: %true if device has used the software IO TLB. * @archdata: For arch-specific additions. * @of_node: Associated device tree node. * @fwnode: Associated device node supplied by platform firmware. * @devt: For creating the sysfs "dev". * @id: device instance * @devres_lock: Spinlock to protect the resource of the device. * @devres_head: The resources list of the device. * @class: The class of the device. * @groups: Optional attribute groups. * @release: Callback to free the device after all references have * gone away. This should be set by the allocator of the * device (i.e. the bus driver that discovered the device). * @iommu_group: IOMMU group the device belongs to. * @iommu: Per device generic IOMMU runtime data * @physical_location: Describes physical location of the device connection * point in the system housing. * @removable: Whether the device can be removed from the system. This * should be set by the subsystem / bus driver that discovered * the device. * * @offline_disabled: If set, the device is permanently online. * @offline: Set after successful invocation of bus type's .offline(). * @of_node_reused: Set if the device-tree node is shared with an ancestor * device. * @state_synced: The hardware state of this device has been synced to match * the software state of this device by calling the driver/bus * sync_state() callback. * @can_match: The device has matched with a driver at least once or it is in * a bus (like AMBA) which can't check for matching drivers until * other devices probe successfully. * @dma_coherent: this particular device is dma coherent, even if the * architecture supports non-coherent devices. * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the * streaming DMA operations (->map_* / ->unmap_* / ->sync_*), * and optionall (if the coherent mask is large enough) also * for dma allocations. This flag is managed by the dma ops * instance from ->dma_supported. * @dma_skip_sync: DMA sync operations can be skipped for coherent buffers. * @dma_iommu: Device is using default IOMMU implementation for DMA and * doesn't rely on dma_ops structure. * * At the lowest level, every device in a Linux system is represented by an * instance of struct device. The device structure contains the information * that the device model core needs to model the system. Most subsystems, * however, track additional information about the devices they host. As a * result, it is rare for devices to be represented by bare device structures; * instead, that structure, like kobject structures, is usually embedded within * a higher-level representation of the device. */ struct device { struct kobject kobj; struct device *parent; struct device_private *p; const char *init_name; /* initial name of the device */ const struct device_type *type; const struct bus_type *bus; /* type of bus device is on */ struct device_driver *driver; /* which driver has allocated this device */ void *platform_data; /* Platform specific data, device core doesn't touch it */ void *driver_data; /* Driver data, set and get with dev_set_drvdata/dev_get_drvdata */ struct mutex mutex; /* mutex to synchronize calls to * its driver. */ struct dev_links_info links; struct dev_pm_info power; struct dev_pm_domain *pm_domain; #ifdef CONFIG_ENERGY_MODEL struct em_perf_domain *em_pd; #endif #ifdef CONFIG_PINCTRL struct dev_pin_info *pins; #endif struct dev_msi_info msi; #ifdef CONFIG_ARCH_HAS_DMA_OPS const struct dma_map_ops *dma_ops; #endif u64 *dma_mask; /* dma mask (if dma'able device) */ u64 coherent_dma_mask;/* Like dma_mask, but for alloc_coherent mappings as not all hardware supports 64 bit addresses for consistent allocations such descriptors. */ u64 bus_dma_limit; /* upstream dma constraint */ const struct bus_dma_region *dma_range_map; struct device_dma_parameters *dma_parms; struct list_head dma_pools; /* dma pools (if dma'ble) */ #ifdef CONFIG_DMA_DECLARE_COHERENT struct dma_coherent_mem *dma_mem; /* internal for coherent mem override */ #endif #ifdef CONFIG_DMA_CMA struct cma *cma_area; /* contiguous memory area for dma allocations */ #endif #ifdef CONFIG_SWIOTLB struct io_tlb_mem *dma_io_tlb_mem; #endif #ifdef CONFIG_SWIOTLB_DYNAMIC struct list_head dma_io_tlb_pools; spinlock_t dma_io_tlb_lock; bool dma_uses_io_tlb; #endif /* arch specific additions */ struct dev_archdata archdata; struct device_node *of_node; /* associated device tree node */ struct fwnode_handle *fwnode; /* firmware device node */ #ifdef CONFIG_NUMA int numa_node; /* NUMA node this device is close to */ #endif dev_t devt; /* dev_t, creates the sysfs "dev" */ u32 id; /* device instance */ spinlock_t devres_lock; struct list_head devres_head; const struct class *class; const struct attribute_group **groups; /* optional groups */ void (*release)(struct device *dev); struct iommu_group *iommu_group; struct dev_iommu *iommu; struct device_physical_location *physical_location; enum device_removable removable; bool offline_disabled:1; bool offline:1; bool of_node_reused:1; bool state_synced:1; bool can_match:1; #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) bool dma_coherent:1; #endif #ifdef CONFIG_DMA_OPS_BYPASS bool dma_ops_bypass : 1; #endif #ifdef CONFIG_DMA_NEED_SYNC bool dma_skip_sync:1; #endif #ifdef CONFIG_IOMMU_DMA bool dma_iommu:1; #endif }; /** * struct device_link - Device link representation. * @supplier: The device on the supplier end of the link. * @s_node: Hook to the supplier device's list of links to consumers. * @consumer: The device on the consumer end of the link. * @c_node: Hook to the consumer device's list of links to suppliers. * @link_dev: device used to expose link details in sysfs * @status: The state of the link (with respect to the presence of drivers). * @flags: Link flags. * @rpm_active: Whether or not the consumer device is runtime-PM-active. * @kref: Count repeated addition of the same link. * @rm_work: Work structure used for removing the link. * @supplier_preactivated: Supplier has been made active before consumer probe. */ struct device_link { struct device *supplier; struct list_head s_node; struct device *consumer; struct list_head c_node; struct device link_dev; enum device_link_state status; u32 flags; refcount_t rpm_active; struct kref kref; struct work_struct rm_work; bool supplier_preactivated; /* Owned by consumer probe. */ }; #define kobj_to_dev(__kobj) container_of_const(__kobj, struct device, kobj) /** * device_iommu_mapped - Returns true when the device DMA is translated * by an IOMMU * @dev: Device to perform the check on */ static inline bool device_iommu_mapped(struct device *dev) { return (dev->iommu_group != NULL); } /* Get the wakeup routines, which depend on struct device */ #include <linux/pm_wakeup.h> /** * dev_name - Return a device's name. * @dev: Device with name to get. * Return: The kobject name of the device, or its initial name if unavailable. */ static inline const char *dev_name(const struct device *dev) { /* Use the init name until the kobject becomes available */ if (dev->init_name) return dev->init_name; return kobject_name(&dev->kobj); } /** * dev_bus_name - Return a device's bus/class name, if at all possible * @dev: struct device to get the bus/class name of * * Will return the name of the bus/class the device is attached to. If it is * not attached to a bus/class, an empty string will be returned. */ static inline const char *dev_bus_name(const struct device *dev) { return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : ""); } __printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...); #ifdef CONFIG_NUMA static inline int dev_to_node(struct device *dev) { return dev->numa_node; } static inline void set_dev_node(struct device *dev, int node) { dev->numa_node = node; } #else static inline int dev_to_node(struct device *dev) { return NUMA_NO_NODE; } static inline void set_dev_node(struct device *dev, int node) { } #endif static inline struct irq_domain *dev_get_msi_domain(const struct device *dev) { #ifdef CONFIG_GENERIC_MSI_IRQ return dev->msi.domain; #else return NULL; #endif } static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d) { #ifdef CONFIG_GENERIC_MSI_IRQ dev->msi.domain = d; #endif } static inline void *dev_get_drvdata(const struct device *dev) { return dev->driver_data; } static inline void dev_set_drvdata(struct device *dev, void *data) { dev->driver_data = data; } static inline struct pm_subsys_data *dev_to_psd(struct device *dev) { return dev ? dev->power.subsys_data : NULL; } static inline unsigned int dev_get_uevent_suppress(const struct device *dev) { return dev->kobj.uevent_suppress; } static inline void dev_set_uevent_suppress(struct device *dev, int val) { dev->kobj.uevent_suppress = val; } static inline int device_is_registered(struct device *dev) { return dev->kobj.state_in_sysfs; } static inline void device_enable_async_suspend(struct device *dev) { if (!dev->power.is_prepared) dev->power.async_suspend = true; } static inline void device_disable_async_suspend(struct device *dev) { if (!dev->power.is_prepared) dev->power.async_suspend = false; } static inline bool device_async_suspend_enabled(struct device *dev) { return !!dev->power.async_suspend; } static inline bool device_pm_not_required(struct device *dev) { return dev->power.no_pm; } static inline void device_set_pm_not_required(struct device *dev) { dev->power.no_pm = true; #ifdef CONFIG_PM dev->power.no_callbacks = true; #endif } static inline void dev_pm_syscore_device(struct device *dev, bool val) { #ifdef CONFIG_PM_SLEEP dev->power.syscore = val; #endif } static inline void dev_pm_set_driver_flags(struct device *dev, u32 flags) { dev->power.driver_flags = flags; } static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags) { return !!(dev->power.driver_flags & flags); } static inline bool dev_pm_smart_suspend(struct device *dev) { #ifdef CONFIG_PM_SLEEP return dev->power.smart_suspend; #else return false; #endif } /* * dev_pm_set_strict_midlayer - Update the device's power.strict_midlayer flag * @dev: Target device. * @val: New flag value. * * When set, power.strict_midlayer means that the middle layer power management * code (typically, a bus type or a PM domain) does not expect its runtime PM * suspend callback to be invoked at all during system-wide PM transitions and * it does not expect its runtime PM resume callback to be invoked at any point * when runtime PM is disabled for the device during system-wide PM transitions. */ static inline void dev_pm_set_strict_midlayer(struct device *dev, bool val) { #ifdef CONFIG_PM_SLEEP dev->power.strict_midlayer = val; #endif } static inline bool dev_pm_strict_midlayer_is_set(struct device *dev) { #ifdef CONFIG_PM_SLEEP return dev->power.strict_midlayer; #else return false; #endif } static inline void device_lock(struct device *dev) { mutex_lock(&dev->mutex); } static inline int device_lock_interruptible(struct device *dev) { return mutex_lock_interruptible(&dev->mutex); } static inline int device_trylock(struct device *dev) { return mutex_trylock(&dev->mutex); } static inline void device_unlock(struct device *dev) { mutex_unlock(&dev->mutex); } DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T)) static inline void device_lock_assert(struct device *dev) { lockdep_assert_held(&dev->mutex); } static inline bool dev_has_sync_state(struct device *dev) { if (!dev) return false; if (dev->driver && dev->driver->sync_state) return true; if (dev->bus && dev->bus->sync_state) return true; return false; } static inline int dev_set_drv_sync_state(struct device *dev, void (*fn)(struct device *dev)) { if (!dev || !dev->driver) return 0; if (dev->driver->sync_state && dev->driver->sync_state != fn) return -EBUSY; if (!dev->driver->sync_state) dev->driver->sync_state = fn; return 0; } static inline void dev_set_removable(struct device *dev, enum device_removable removable) { dev->removable = removable; } static inline bool dev_is_removable(struct device *dev) { return dev->removable == DEVICE_REMOVABLE; } static inline bool dev_removable_is_valid(struct device *dev) { return dev->removable != DEVICE_REMOVABLE_NOT_SUPPORTED; } /* * High level routines for use by the bus drivers */ int __must_check device_register(struct device *dev); void device_unregister(struct device *dev); void device_initialize(struct device *dev); int __must_check device_add(struct device *dev); void device_del(struct device *dev); DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T)) int device_for_each_child(struct device *parent, void *data, device_iter_t fn); int device_for_each_child_reverse(struct device *parent, void *data, device_iter_t fn); int device_for_each_child_reverse_from(struct device *parent, struct device *from, void *data, device_iter_t fn); struct device *device_find_child(struct device *parent, const void *data, device_match_t match); /** * device_find_child_by_name - device iterator for locating a child device. * @parent: parent struct device * @name: name of the child device * * This is similar to the device_find_child() function above, but it * returns a reference to a device that has the name @name. * * NOTE: you will need to drop the reference with put_device() after use. */ static inline struct device *device_find_child_by_name(struct device *parent, const char *name) { return device_find_child(parent, name, device_match_name); } /** * device_find_any_child - device iterator for locating a child device, if any. * @parent: parent struct device * * This is similar to the device_find_child() function above, but it * returns a reference to a child device, if any. * * NOTE: you will need to drop the reference with put_device() after use. */ static inline struct device *device_find_any_child(struct device *parent) { return device_find_child(parent, NULL, device_match_any); } int device_rename(struct device *dev, const char *new_name); int device_move(struct device *dev, struct device *new_parent, enum dpm_order dpm_order); int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); static inline bool device_supports_offline(struct device *dev) { return dev->bus && dev->bus->offline && dev->bus->online; } #define __device_lock_set_class(dev, name, key) \ do { \ struct device *__d2 __maybe_unused = dev; \ lock_set_class(&__d2->mutex.dep_map, name, key, 0, _THIS_IP_); \ } while (0) /** * device_lock_set_class - Specify a temporary lock class while a device * is attached to a driver * @dev: device to modify * @key: lock class key data * * This must be called with the device_lock() already held, for example * from driver ->probe(). Take care to only override the default * lockdep_no_validate class. */ #ifdef CONFIG_LOCKDEP #define device_lock_set_class(dev, key) \ do { \ struct device *__d = dev; \ dev_WARN_ONCE(__d, !lockdep_match_class(&__d->mutex, \ &__lockdep_no_validate__), \ "overriding existing custom lock class\n"); \ __device_lock_set_class(__d, #key, key); \ } while (0) #else #define device_lock_set_class(dev, key) __device_lock_set_class(dev, #key, key) #endif /** * device_lock_reset_class - Return a device to the default lockdep novalidate state * @dev: device to modify * * This must be called with the device_lock() already held, for example * from driver ->remove(). */ #define device_lock_reset_class(dev) \ do { \ struct device *__d __maybe_unused = dev; \ lock_set_novalidate_class(&__d->mutex.dep_map, "&dev->mutex", \ _THIS_IP_); \ } while (0) void lock_device_hotplug(void); void unlock_device_hotplug(void); int lock_device_hotplug_sysfs(void); int device_offline(struct device *dev); int device_online(struct device *dev); void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); void device_set_node(struct device *dev, struct fwnode_handle *fwnode); int device_add_of_node(struct device *dev, struct device_node *of_node); void device_remove_of_node(struct device *dev); void device_set_of_node_from_dev(struct device *dev, const struct device *dev2); struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode); static inline struct device_node *dev_of_node(struct device *dev) { if (!IS_ENABLED(CONFIG_OF) || !dev) return NULL; return dev->of_node; } static inline int dev_num_vf(struct device *dev) { if (dev->bus && dev->bus->num_vf) return dev->bus->num_vf(dev); return 0; } /* * Root device objects for grouping under /sys/devices */ struct device *__root_device_register(const char *name, struct module *owner); /* This is a macro to avoid include problems with THIS_MODULE */ #define root_device_register(name) \ __root_device_register(name, THIS_MODULE) void root_device_unregister(struct device *root); static inline void *dev_get_platdata(const struct device *dev) { return dev->platform_data; } /* * Manual binding of a device to driver. See drivers/base/bus.c * for information on use. */ int __must_check device_driver_attach(const struct device_driver *drv, struct device *dev); int __must_check device_bind_driver(struct device *dev); void device_release_driver(struct device *dev); int __must_check device_attach(struct device *dev); int __must_check driver_attach(const struct device_driver *drv); void device_initial_probe(struct device *dev); int __must_check device_reprobe(struct device *dev); bool device_is_bound(struct device *dev); /* * Easy functions for dynamically creating devices on the fly */ __printf(5, 6) struct device * device_create(const struct class *cls, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...); __printf(6, 7) struct device * device_create_with_groups(const struct class *cls, struct device *parent, dev_t devt, void *drvdata, const struct attribute_group **groups, const char *fmt, ...); void device_destroy(const struct class *cls, dev_t devt); int __must_check device_add_groups(struct device *dev, const struct attribute_group **groups); void device_remove_groups(struct device *dev, const struct attribute_group **groups); static inline int __must_check device_add_group(struct device *dev, const struct attribute_group *grp) { const struct attribute_group *groups[] = { grp, NULL }; return device_add_groups(dev, groups); } static inline void device_remove_group(struct device *dev, const struct attribute_group *grp) { const struct attribute_group *groups[] = { grp, NULL }; device_remove_groups(dev, groups); } int __must_check devm_device_add_group(struct device *dev, const struct attribute_group *grp); /* * get_device - atomically increment the reference count for the device. * */ struct device *get_device(struct device *dev); void put_device(struct device *dev); DEFINE_FREE(put_device, struct device *, if (_T) put_device(_T)) bool kill_device(struct device *dev); #ifdef CONFIG_DEVTMPFS int devtmpfs_mount(void); #else static inline int devtmpfs_mount(void) { return 0; } #endif /* drivers/base/power/shutdown.c */ void device_shutdown(void); /* debugging and troubleshooting/diagnostic helpers. */ const char *dev_driver_string(const struct device *dev); /* Device links interface. */ struct device_link *device_link_add(struct device *consumer, struct device *supplier, u32 flags); void device_link_del(struct device_link *link); void device_link_remove(void *consumer, struct device *supplier); void device_links_supplier_sync_state_pause(void); void device_links_supplier_sync_state_resume(void); void device_link_wait_removal(void); static inline bool device_link_test(const struct device_link *link, u32 flags) { return !!(link->flags & flags); } /* Create alias, so I can be autoloaded. */ #define MODULE_ALIAS_CHARDEV(major,minor) \ MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) #define MODULE_ALIAS_CHARDEV_MAJOR(major) \ MODULE_ALIAS("char-major-" __stringify(major) "-*") #endif /* _DEVICE_H_ */
6 2 2 1 1 1 2 21 21 17 17 5 17 2 17 17 16 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 9 9 9 9 2 7 8 9 9 1 1 1 1 9 2 2 2 7 7 7 7 7 7 7 7 7 2 2 2 17 17 9 17 9 9 7 1 1 1 1 1 7 7 7 7 17 17 16 16 17 16 16 16 16 16 16 16 16 29 9 9 29 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 2 2 1 3 2 2 2 20 1 2 2 22 22 20 21 21 3 1 1 20 1 3 3 1 6 22 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved * Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org> * Copyright 2001-2006 Ian Kent <raven@themaw.net> */ #include <linux/capability.h> #include <linux/compat.h> #include "autofs_i.h" static int autofs_dir_permission(struct mnt_idmap *, struct inode *, int); static int autofs_dir_symlink(struct mnt_idmap *, struct inode *, struct dentry *, const char *); static int autofs_dir_unlink(struct inode *, struct dentry *); static int autofs_dir_rmdir(struct inode *, struct dentry *); static struct dentry *autofs_dir_mkdir(struct mnt_idmap *, struct inode *, struct dentry *, umode_t); static long autofs_root_ioctl(struct file *, unsigned int, unsigned long); #ifdef CONFIG_COMPAT static long autofs_root_compat_ioctl(struct file *, unsigned int, unsigned long); #endif static int autofs_dir_open(struct inode *inode, struct file *file); static struct dentry *autofs_lookup(struct inode *, struct dentry *, unsigned int); static struct vfsmount *autofs_d_automount(struct path *); static int autofs_d_manage(const struct path *, bool); static void autofs_dentry_release(struct dentry *); const struct file_operations autofs_root_operations = { .open = dcache_dir_open, .release = dcache_dir_close, .read = generic_read_dir, .iterate_shared = dcache_readdir, .llseek = dcache_dir_lseek, .unlocked_ioctl = autofs_root_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = autofs_root_compat_ioctl, #endif }; const struct file_operations autofs_dir_operations = { .open = autofs_dir_open, .release = dcache_dir_close, .read = generic_read_dir, .iterate_shared = dcache_readdir, .llseek = dcache_dir_lseek, }; const struct inode_operations autofs_dir_inode_operations = { .lookup = autofs_lookup, .permission = autofs_dir_permission, .unlink = autofs_dir_unlink, .symlink = autofs_dir_symlink, .mkdir = autofs_dir_mkdir, .rmdir = autofs_dir_rmdir, }; const struct dentry_operations autofs_dentry_operations = { .d_automount = autofs_d_automount, .d_manage = autofs_d_manage, .d_release = autofs_dentry_release, }; static void autofs_del_active(struct dentry *dentry) { struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); struct autofs_info *ino; ino = autofs_dentry_ino(dentry); spin_lock(&sbi->lookup_lock); list_del_init(&ino->active); spin_unlock(&sbi->lookup_lock); } static int autofs_dir_open(struct inode *inode, struct file *file) { struct dentry *dentry = file->f_path.dentry; struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); struct autofs_info *ino = autofs_dentry_ino(dentry); pr_debug("file=%p dentry=%p %pd\n", file, dentry, dentry); if (autofs_oz_mode(sbi)) goto out; /* * An empty directory in an autofs file system is always a * mount point. The daemon must have failed to mount this * during lookup so it doesn't exist. This can happen, for * example, if user space returns an incorrect status for a * mount request. Otherwise we're doing a readdir on the * autofs file system so just let the libfs routines handle * it. */ spin_lock(&sbi->lookup_lock); if (!path_is_mountpoint(&file->f_path) && autofs_empty(ino)) { spin_unlock(&sbi->lookup_lock); return -ENOENT; } spin_unlock(&sbi->lookup_lock); out: return dcache_dir_open(inode, file); } static void autofs_dentry_release(struct dentry *de) { struct autofs_info *ino = autofs_dentry_ino(de); struct autofs_sb_info *sbi = autofs_sbi(de->d_sb); pr_debug("releasing %p\n", de); if (!ino) return; if (sbi) { spin_lock(&sbi->lookup_lock); if (!list_empty(&ino->active)) list_del(&ino->active); if (!list_empty(&ino->expiring)) list_del(&ino->expiring); spin_unlock(&sbi->lookup_lock); } autofs_free_ino(ino); } static struct dentry *autofs_lookup_active(struct dentry *dentry) { struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); struct dentry *parent = dentry->d_parent; const struct qstr *name = &dentry->d_name; unsigned int len = name->len; unsigned int hash = name->hash; const unsigned char *str = name->name; struct list_head *p, *head; head = &sbi->active_list; if (list_empty(head)) return NULL; spin_lock(&sbi->lookup_lock); list_for_each(p, head) { struct autofs_info *ino; struct dentry *active; const struct qstr *qstr; ino = list_entry(p, struct autofs_info, active); active = ino->dentry; spin_lock(&active->d_lock); /* Already gone? */ if ((int) d_count(active) <= 0) goto next; qstr = &active->d_name; if (active->d_name.hash != hash) goto next; if (active->d_parent != parent) goto next; if (qstr->len != len) goto next; if (memcmp(qstr->name, str, len)) goto next; if (d_unhashed(active)) { dget_dlock(active); spin_unlock(&active->d_lock); spin_unlock(&sbi->lookup_lock); return active; } next: spin_unlock(&active->d_lock); } spin_unlock(&sbi->lookup_lock); return NULL; } static struct dentry *autofs_lookup_expiring(struct dentry *dentry, bool rcu_walk) { struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); struct dentry *parent = dentry->d_parent; const struct qstr *name = &dentry->d_name; unsigned int len = name->len; unsigned int hash = name->hash; const unsigned char *str = name->name; struct list_head *p, *head; head = &sbi->expiring_list; if (list_empty(head)) return NULL; spin_lock(&sbi->lookup_lock); list_for_each(p, head) { struct autofs_info *ino; struct dentry *expiring; const struct qstr *qstr; if (rcu_walk) { spin_unlock(&sbi->lookup_lock); return ERR_PTR(-ECHILD); } ino = list_entry(p, struct autofs_info, expiring); expiring = ino->dentry; spin_lock(&expiring->d_lock); /* We've already been dentry_iput or unlinked */ if (d_really_is_negative(expiring)) goto next; qstr = &expiring->d_name; if (expiring->d_name.hash != hash) goto next; if (expiring->d_parent != parent) goto next; if (qstr->len != len) goto next; if (memcmp(qstr->name, str, len)) goto next; if (d_unhashed(expiring)) { dget_dlock(expiring); spin_unlock(&expiring->d_lock); spin_unlock(&sbi->lookup_lock); return expiring; } next: spin_unlock(&expiring->d_lock); } spin_unlock(&sbi->lookup_lock); return NULL; } static int autofs_mount_wait(const struct path *path, bool rcu_walk) { struct autofs_sb_info *sbi = autofs_sbi(path->dentry->d_sb); struct autofs_info *ino = autofs_dentry_ino(path->dentry); int status = 0; if (ino->flags & AUTOFS_INF_PENDING) { if (rcu_walk) return -ECHILD; pr_debug("waiting for mount name=%pd\n", path->dentry); status = autofs_wait(sbi, path, NFY_MOUNT); pr_debug("mount wait done status=%d\n", status); ino->last_used = jiffies; return status; } if (!(sbi->flags & AUTOFS_SBI_STRICTEXPIRE)) ino->last_used = jiffies; return status; } static int do_expire_wait(const struct path *path, bool rcu_walk) { struct dentry *dentry = path->dentry; struct dentry *expiring; expiring = autofs_lookup_expiring(dentry, rcu_walk); if (IS_ERR(expiring)) return PTR_ERR(expiring); if (!expiring) return autofs_expire_wait(path, rcu_walk); else { const struct path this = { .mnt = path->mnt, .dentry = expiring }; /* * If we are racing with expire the request might not * be quite complete, but the directory has been removed * so it must have been successful, just wait for it. */ autofs_expire_wait(&this, 0); autofs_del_expiring(expiring); dput(expiring); } return 0; } static struct dentry *autofs_mountpoint_changed(struct path *path) { struct dentry *dentry = path->dentry; struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); /* If this is an indirect mount the dentry could have gone away * and a new one created. * * This is unusual and I can't remember the case for which it * was originally added now. But an example of how this can * happen is an autofs indirect mount that has the "browse" * option set and also has the "symlink" option in the autofs * map entry. In this case the daemon will remove the browse * directory and create a symlink as the mount leaving the * struct path stale. * * Another not so obvious case is when a mount in an autofs * indirect mount that uses the "nobrowse" option is being * expired at the same time as a path walk. If the mount has * been umounted but the mount point directory seen before * becoming unhashed (during a lockless path walk) when a stat * family system call is made the mount won't be re-mounted as * it should. In this case the mount point that's been removed * (by the daemon) will be stale and the a new mount point * dentry created. */ if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) { struct dentry *parent = dentry->d_parent; struct autofs_info *ino; struct dentry *new; new = d_lookup(parent, &dentry->d_name); if (!new) return NULL; ino = autofs_dentry_ino(new); ino->last_used = jiffies; dput(path->dentry); path->dentry = new; } return path->dentry; } static struct vfsmount *autofs_d_automount(struct path *path) { struct dentry *dentry = path->dentry; struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); struct autofs_info *ino = autofs_dentry_ino(dentry); int status; pr_debug("dentry=%p %pd\n", dentry, dentry); /* The daemon never triggers a mount. */ if (autofs_oz_mode(sbi)) return NULL; /* * If an expire request is pending everyone must wait. * If the expire fails we're still mounted so continue * the follow and return. A return of -EAGAIN (which only * happens with indirect mounts) means the expire completed * and the directory was removed, so just go ahead and try * the mount. */ status = do_expire_wait(path, 0); if (status && status != -EAGAIN) return NULL; /* Callback to the daemon to perform the mount or wait */ spin_lock(&sbi->fs_lock); if (ino->flags & AUTOFS_INF_PENDING) { spin_unlock(&sbi->fs_lock); status = autofs_mount_wait(path, 0); if (status) return ERR_PTR(status); goto done; } /* * If the dentry is a symlink it's equivalent to a directory * having path_is_mountpoint() true, so there's no need to call * back to the daemon. */ if (d_really_is_positive(dentry) && d_is_symlink(dentry)) { spin_unlock(&sbi->fs_lock); goto done; } if (!path_is_mountpoint(path)) { /* * It's possible that user space hasn't removed directories * after umounting a rootless multi-mount, although it * should. For v5 path_has_submounts() is sufficient to * handle this because the leaves of the directory tree under * the mount never trigger mounts themselves (they have an * autofs trigger mount mounted on them). But v4 pseudo direct * mounts do need the leaves to trigger mounts. In this case * we have no choice but to use the autofs_empty() check and * require user space behave. */ if (sbi->version > 4) { if (path_has_submounts(path)) { spin_unlock(&sbi->fs_lock); goto done; } } else { if (!autofs_empty(ino)) { spin_unlock(&sbi->fs_lock); goto done; } } ino->flags |= AUTOFS_INF_PENDING; spin_unlock(&sbi->fs_lock); status = autofs_mount_wait(path, 0); spin_lock(&sbi->fs_lock); ino->flags &= ~AUTOFS_INF_PENDING; if (status) { spin_unlock(&sbi->fs_lock); return ERR_PTR(status); } } spin_unlock(&sbi->fs_lock); done: /* Mount succeeded, check if we ended up with a new dentry */ dentry = autofs_mountpoint_changed(path); if (!dentry) return ERR_PTR(-ENOENT); return NULL; } static int autofs_d_manage(const struct path *path, bool rcu_walk) { struct dentry *dentry = path->dentry; struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); struct autofs_info *ino = autofs_dentry_ino(dentry); int status; pr_debug("dentry=%p %pd\n", dentry, dentry); /* The daemon never waits. */ if (autofs_oz_mode(sbi)) { if (!path_is_mountpoint(path)) return -EISDIR; return 0; } /* Wait for pending expires */ if (do_expire_wait(path, rcu_walk) == -ECHILD) return -ECHILD; /* * This dentry may be under construction so wait on mount * completion. */ status = autofs_mount_wait(path, rcu_walk); if (status) return status; if (rcu_walk) { /* We don't need fs_lock in rcu_walk mode, * just testing 'AUTOFS_INF_WANT_EXPIRE' is enough. * * We only return -EISDIR when certain this isn't * a mount-trap. */ struct inode *inode; if (ino->flags & AUTOFS_INF_WANT_EXPIRE) return 0; if (path_is_mountpoint(path)) return 0; inode = d_inode_rcu(dentry); if (inode && S_ISLNK(inode->i_mode)) return -EISDIR; if (!autofs_empty(ino)) return -EISDIR; return 0; } spin_lock(&sbi->fs_lock); /* * If the dentry has been selected for expire while we slept * on the lock then it might go away. We'll deal with that in * ->d_automount() and wait on a new mount if the expire * succeeds or return here if it doesn't (since there's no * mount to follow with a rootless multi-mount). */ if (!(ino->flags & AUTOFS_INF_EXPIRING)) { /* * Any needed mounting has been completed and the path * updated so check if this is a rootless multi-mount so * we can avoid needless calls ->d_automount() and avoid * an incorrect ELOOP error return. */ if ((!path_is_mountpoint(path) && !autofs_empty(ino)) || (d_really_is_positive(dentry) && d_is_symlink(dentry))) status = -EISDIR; } spin_unlock(&sbi->fs_lock); return status; } /* Lookups in the root directory */ static struct dentry *autofs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct autofs_sb_info *sbi; struct autofs_info *ino; struct dentry *active; pr_debug("name = %pd\n", dentry); /* File name too long to exist */ if (dentry->d_name.len > NAME_MAX) return ERR_PTR(-ENAMETOOLONG); sbi = autofs_sbi(dir->i_sb); pr_debug("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d\n", current->pid, task_pgrp_nr(current), sbi->flags & AUTOFS_SBI_CATATONIC, autofs_oz_mode(sbi)); active = autofs_lookup_active(dentry); if (active) return active; else { /* * A dentry that is not within the root can never trigger a * mount operation, unless the directory already exists, so we * can return fail immediately. The daemon however does need * to create directories within the file system. */ if (!autofs_oz_mode(sbi) && !IS_ROOT(dentry->d_parent)) return ERR_PTR(-ENOENT); ino = autofs_new_ino(sbi); if (!ino) return ERR_PTR(-ENOMEM); spin_lock(&sbi->lookup_lock); spin_lock(&dentry->d_lock); /* Mark entries in the root as mount triggers */ if (IS_ROOT(dentry->d_parent) && autofs_type_indirect(sbi->type)) __managed_dentry_set_managed(dentry); dentry->d_fsdata = ino; ino->dentry = dentry; list_add(&ino->active, &sbi->active_list); spin_unlock(&sbi->lookup_lock); spin_unlock(&dentry->d_lock); } return NULL; } static int autofs_dir_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) { if (mask & MAY_WRITE) { struct autofs_sb_info *sbi = autofs_sbi(inode->i_sb); if (!autofs_oz_mode(sbi)) return -EACCES; /* autofs_oz_mode() needs to allow path walks when the * autofs mount is catatonic but the state of an autofs * file system needs to be preserved over restarts. */ if (sbi->flags & AUTOFS_SBI_CATATONIC) return -EACCES; } return generic_permission(idmap, inode, mask); } static int autofs_dir_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *symname) { struct autofs_info *ino = autofs_dentry_ino(dentry); struct autofs_info *p_ino; struct inode *inode; size_t size = strlen(symname); char *cp; pr_debug("%s <- %pd\n", symname, dentry); BUG_ON(!ino); autofs_clean_ino(ino); autofs_del_active(dentry); cp = kmalloc(size + 1, GFP_KERNEL); if (!cp) return -ENOMEM; strcpy(cp, symname); inode = autofs_get_inode(dir->i_sb, S_IFLNK | 0555); if (!inode) { kfree(cp); return -ENOMEM; } inode->i_private = cp; inode->i_size = size; d_add(dentry, inode); dget(dentry); p_ino = autofs_dentry_ino(dentry->d_parent); p_ino->count++; inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); return 0; } /* * NOTE! * * Normal filesystems would do a "d_delete()" to tell the VFS dcache * that the file no longer exists. However, doing that means that the * VFS layer can turn the dentry into a negative dentry. We don't want * this, because the unlink is probably the result of an expire. * We simply d_drop it and add it to a expiring list in the super block, * which allows the dentry lookup to check for an incomplete expire. * * If a process is blocked on the dentry waiting for the expire to finish, * it will invalidate the dentry and try to mount with a new one. * * Also see autofs_dir_rmdir().. */ static int autofs_dir_unlink(struct inode *dir, struct dentry *dentry) { struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb); struct autofs_info *ino = autofs_dentry_ino(dentry); struct autofs_info *p_ino; p_ino = autofs_dentry_ino(dentry->d_parent); p_ino->count--; dput(ino->dentry); d_inode(dentry)->i_size = 0; clear_nlink(d_inode(dentry)); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); spin_lock(&sbi->lookup_lock); __autofs_add_expiring(dentry); d_drop(dentry); spin_unlock(&sbi->lookup_lock); return 0; } /* * Version 4 of autofs provides a pseudo direct mount implementation * that relies on directories at the leaves of a directory tree under * an indirect mount to trigger mounts. To allow for this we need to * set the DMANAGED_AUTOMOUNT and DMANAGED_TRANSIT flags on the leaves * of the directory tree. There is no need to clear the automount flag * following a mount or restore it after an expire because these mounts * are always covered. However, it is necessary to ensure that these * flags are clear on non-empty directories to avoid unnecessary calls * during path walks. */ static void autofs_set_leaf_automount_flags(struct dentry *dentry) { struct dentry *parent; /* root and dentrys in the root are already handled */ if (IS_ROOT(dentry->d_parent)) return; managed_dentry_set_managed(dentry); parent = dentry->d_parent; /* only consider parents below dentrys in the root */ if (IS_ROOT(parent->d_parent)) return; managed_dentry_clear_managed(parent); } static void autofs_clear_leaf_automount_flags(struct dentry *dentry) { struct dentry *parent; /* flags for dentrys in the root are handled elsewhere */ if (IS_ROOT(dentry->d_parent)) return; managed_dentry_clear_managed(dentry); parent = dentry->d_parent; /* only consider parents below dentrys in the root */ if (IS_ROOT(parent->d_parent)) return; if (autofs_dentry_ino(parent)->count == 2) managed_dentry_set_managed(parent); } static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry) { struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb); struct autofs_info *ino = autofs_dentry_ino(dentry); struct autofs_info *p_ino; pr_debug("dentry %p, removing %pd\n", dentry, dentry); if (ino->count != 1) return -ENOTEMPTY; spin_lock(&sbi->lookup_lock); __autofs_add_expiring(dentry); d_drop(dentry); spin_unlock(&sbi->lookup_lock); if (sbi->version < 5) autofs_clear_leaf_automount_flags(dentry); p_ino = autofs_dentry_ino(dentry->d_parent); p_ino->count--; dput(ino->dentry); d_inode(dentry)->i_size = 0; clear_nlink(d_inode(dentry)); if (dir->i_nlink) drop_nlink(dir); return 0; } static struct dentry *autofs_dir_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb); struct autofs_info *ino = autofs_dentry_ino(dentry); struct autofs_info *p_ino; struct inode *inode; pr_debug("dentry %p, creating %pd\n", dentry, dentry); BUG_ON(!ino); autofs_clean_ino(ino); autofs_del_active(dentry); inode = autofs_get_inode(dir->i_sb, S_IFDIR | mode); if (!inode) return ERR_PTR(-ENOMEM); d_add(dentry, inode); if (sbi->version < 5) autofs_set_leaf_automount_flags(dentry); dget(dentry); p_ino = autofs_dentry_ino(dentry->d_parent); p_ino->count++; inc_nlink(dir); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); return NULL; } /* Get/set timeout ioctl() operation */ #ifdef CONFIG_COMPAT static inline int autofs_compat_get_set_timeout(struct autofs_sb_info *sbi, compat_ulong_t __user *p) { unsigned long ntimeout; int rv; rv = get_user(ntimeout, p); if (rv) goto error; rv = put_user(sbi->exp_timeout/HZ, p); if (rv) goto error; if (ntimeout > UINT_MAX/HZ) sbi->exp_timeout = 0; else sbi->exp_timeout = ntimeout * HZ; return 0; error: return rv; } #endif static inline int autofs_get_set_timeout(struct autofs_sb_info *sbi, unsigned long __user *p) { unsigned long ntimeout; int rv; rv = get_user(ntimeout, p); if (rv) goto error; rv = put_user(sbi->exp_timeout/HZ, p); if (rv) goto error; if (ntimeout > ULONG_MAX/HZ) sbi->exp_timeout = 0; else sbi->exp_timeout = ntimeout * HZ; return 0; error: return rv; } /* Return protocol version */ static inline int autofs_get_protover(struct autofs_sb_info *sbi, int __user *p) { return put_user(sbi->version, p); } /* Return protocol sub version */ static inline int autofs_get_protosubver(struct autofs_sb_info *sbi, int __user *p) { return put_user(sbi->sub_version, p); } /* * Tells the daemon whether it can umount the autofs mount. */ static inline int autofs_ask_umount(struct vfsmount *mnt, int __user *p) { int status = 0; if (may_umount(mnt)) status = 1; pr_debug("may umount %d\n", status); status = put_user(status, p); return status; } /* Identify autofs_dentries - this is so we can tell if there's * an extra dentry refcount or not. We only hold a refcount on the * dentry if its non-negative (ie, d_inode != NULL) */ int is_autofs_dentry(struct dentry *dentry) { return dentry && d_really_is_positive(dentry) && dentry->d_op == &autofs_dentry_operations && dentry->d_fsdata != NULL; } /* * ioctl()'s on the root directory is the chief method for the daemon to * generate kernel reactions */ static int autofs_root_ioctl_unlocked(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { struct autofs_sb_info *sbi = autofs_sbi(inode->i_sb); void __user *p = (void __user *)arg; pr_debug("cmd = 0x%08x, arg = 0x%08lx, sbi = %p, pgrp = %u\n", cmd, arg, sbi, task_pgrp_nr(current)); if (_IOC_TYPE(cmd) != _IOC_TYPE(AUTOFS_IOC_FIRST) || _IOC_NR(cmd) - _IOC_NR(AUTOFS_IOC_FIRST) >= AUTOFS_IOC_COUNT) return -ENOTTY; if (!autofs_oz_mode(sbi) && !capable(CAP_SYS_ADMIN)) return -EPERM; switch (cmd) { case AUTOFS_IOC_READY: /* Wait queue: go ahead and retry */ return autofs_wait_release(sbi, (autofs_wqt_t) arg, 0); case AUTOFS_IOC_FAIL: /* Wait queue: fail with ENOENT */ return autofs_wait_release(sbi, (autofs_wqt_t) arg, -ENOENT); case AUTOFS_IOC_CATATONIC: /* Enter catatonic mode (daemon shutdown) */ autofs_catatonic_mode(sbi); return 0; case AUTOFS_IOC_PROTOVER: /* Get protocol version */ return autofs_get_protover(sbi, p); case AUTOFS_IOC_PROTOSUBVER: /* Get protocol sub version */ return autofs_get_protosubver(sbi, p); case AUTOFS_IOC_SETTIMEOUT: return autofs_get_set_timeout(sbi, p); #ifdef CONFIG_COMPAT case AUTOFS_IOC_SETTIMEOUT32: return autofs_compat_get_set_timeout(sbi, p); #endif case AUTOFS_IOC_ASKUMOUNT: return autofs_ask_umount(filp->f_path.mnt, p); /* return a single thing to expire */ case AUTOFS_IOC_EXPIRE: return autofs_expire_run(inode->i_sb, filp->f_path.mnt, sbi, p); /* same as above, but can send multiple expires through pipe */ case AUTOFS_IOC_EXPIRE_MULTI: return autofs_expire_multi(inode->i_sb, filp->f_path.mnt, sbi, p); default: return -EINVAL; } } static long autofs_root_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); return autofs_root_ioctl_unlocked(inode, filp, cmd, arg); } #ifdef CONFIG_COMPAT static long autofs_root_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); int ret; if (cmd == AUTOFS_IOC_READY || cmd == AUTOFS_IOC_FAIL) ret = autofs_root_ioctl_unlocked(inode, filp, cmd, arg); else ret = autofs_root_ioctl_unlocked(inode, filp, cmd, (unsigned long) compat_ptr(arg)); return ret; } #endif
4 3 2 3 3 2 2 1 1 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 // SPDX-License-Identifier: GPL-2.0-only /* * Kernel module to match various things tied to sockets associated with * locally generated outgoing packets. * * (C) 2000 Marc Boucher <marc@mbsi.ca> * * Copyright © CC Computer Consultants GmbH, 2007 - 2008 */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/file.h> #include <linux/cred.h> #include <net/sock.h> #include <net/inet_sock.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_owner.h> static int owner_check(const struct xt_mtchk_param *par) { struct xt_owner_match_info *info = par->matchinfo; struct net *net = par->net; if (info->match & ~XT_OWNER_MASK) return -EINVAL; /* Only allow the common case where the userns of the writer * matches the userns of the network namespace. */ if ((info->match & (XT_OWNER_UID|XT_OWNER_GID)) && (current_user_ns() != net->user_ns)) return -EINVAL; /* Ensure the uids are valid */ if (info->match & XT_OWNER_UID) { kuid_t uid_min = make_kuid(net->user_ns, info->uid_min); kuid_t uid_max = make_kuid(net->user_ns, info->uid_max); if (!uid_valid(uid_min) || !uid_valid(uid_max) || (info->uid_max < info->uid_min) || uid_lt(uid_max, uid_min)) { return -EINVAL; } } /* Ensure the gids are valid */ if (info->match & XT_OWNER_GID) { kgid_t gid_min = make_kgid(net->user_ns, info->gid_min); kgid_t gid_max = make_kgid(net->user_ns, info->gid_max); if (!gid_valid(gid_min) || !gid_valid(gid_max) || (info->gid_max < info->gid_min) || gid_lt(gid_max, gid_min)) { return -EINVAL; } } return 0; } static bool owner_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_owner_match_info *info = par->matchinfo; const struct file *filp; struct sock *sk = skb_to_full_sk(skb); struct net *net = xt_net(par); if (!sk || !sk->sk_socket || !net_eq(net, sock_net(sk))) return (info->match ^ info->invert) == 0; else if (info->match & info->invert & XT_OWNER_SOCKET) /* * Socket exists but user wanted ! --socket-exists. * (Single ampersands intended.) */ return false; read_lock_bh(&sk->sk_callback_lock); filp = sk->sk_socket ? sk->sk_socket->file : NULL; if (filp == NULL) { read_unlock_bh(&sk->sk_callback_lock); return ((info->match ^ info->invert) & (XT_OWNER_UID | XT_OWNER_GID)) == 0; } if (info->match & XT_OWNER_UID) { kuid_t uid_min = make_kuid(net->user_ns, info->uid_min); kuid_t uid_max = make_kuid(net->user_ns, info->uid_max); if ((uid_gte(filp->f_cred->fsuid, uid_min) && uid_lte(filp->f_cred->fsuid, uid_max)) ^ !(info->invert & XT_OWNER_UID)) { read_unlock_bh(&sk->sk_callback_lock); return false; } } if (info->match & XT_OWNER_GID) { unsigned int i, match = false; kgid_t gid_min = make_kgid(net->user_ns, info->gid_min); kgid_t gid_max = make_kgid(net->user_ns, info->gid_max); struct group_info *gi = filp->f_cred->group_info; if (gid_gte(filp->f_cred->fsgid, gid_min) && gid_lte(filp->f_cred->fsgid, gid_max)) match = true; if (!match && (info->match & XT_OWNER_SUPPL_GROUPS) && gi) { for (i = 0; i < gi->ngroups; ++i) { kgid_t group = gi->gid[i]; if (gid_gte(group, gid_min) && gid_lte(group, gid_max)) { match = true; break; } } } if (match ^ !(info->invert & XT_OWNER_GID)) { read_unlock_bh(&sk->sk_callback_lock); return false; } } read_unlock_bh(&sk->sk_callback_lock); return true; } static struct xt_match owner_mt_reg __read_mostly = { .name = "owner", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = owner_check, .match = owner_mt, .matchsize = sizeof(struct xt_owner_match_info), .hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_POST_ROUTING), .me = THIS_MODULE, }; static int __init owner_mt_init(void) { return xt_register_match(&owner_mt_reg); } static void __exit owner_mt_exit(void) { xt_unregister_match(&owner_mt_reg); } module_init(owner_mt_init); module_exit(owner_mt_exit); MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); MODULE_DESCRIPTION("Xtables: socket owner matching"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_owner"); MODULE_ALIAS("ip6t_owner");
24229 2439 2440 2446 2446 2441 2446 2444 1331 1331 1456 1456 2446 7590 7596 7611 7613 7599 7613 7591 4099 4097 5544 5546 7613 23554 23657 23653 23661 23584 23567 23660 5105 5115 2391 182 341 87 12 9745 23 325 11 11672 9816 9815 9792 11694 7521 319 8 27 2 319 319 319 319 319 265 53 53 53 53 53 53 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2021, Google LLC. * Pasha Tatashin <pasha.tatashin@soleen.com> */ #include <linux/kstrtox.h> #include <linux/mm.h> #include <linux/page_table_check.h> #include <linux/swap.h> #include <linux/swapops.h> #undef pr_fmt #define pr_fmt(fmt) "page_table_check: " fmt struct page_table_check { atomic_t anon_map_count; atomic_t file_map_count; }; static bool __page_table_check_enabled __initdata = IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED); DEFINE_STATIC_KEY_TRUE(page_table_check_disabled); EXPORT_SYMBOL(page_table_check_disabled); static int __init early_page_table_check_param(char *buf) { return kstrtobool(buf, &__page_table_check_enabled); } early_param("page_table_check", early_page_table_check_param); static bool __init need_page_table_check(void) { return __page_table_check_enabled; } static void __init init_page_table_check(void) { if (!__page_table_check_enabled) return; static_branch_disable(&page_table_check_disabled); } struct page_ext_operations page_table_check_ops = { .size = sizeof(struct page_table_check), .need = need_page_table_check, .init = init_page_table_check, .need_shared_flags = false, }; static struct page_table_check *get_page_table_check(struct page_ext *page_ext) { BUG_ON(!page_ext); return page_ext_data(page_ext, &page_table_check_ops); } /* * An entry is removed from the page table, decrement the counters for that page * verify that it is of correct type and counters do not become negative. */ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt) { struct page_ext_iter iter; struct page_ext *page_ext; struct page *page; bool anon; if (!pfn_valid(pfn)) return; page = pfn_to_page(pfn); BUG_ON(PageSlab(page)); anon = PageAnon(page); rcu_read_lock(); for_each_page_ext(page, pgcnt, page_ext, iter) { struct page_table_check *ptc = get_page_table_check(page_ext); if (anon) { BUG_ON(atomic_read(&ptc->file_map_count)); BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0); } else { BUG_ON(atomic_read(&ptc->anon_map_count)); BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0); } } rcu_read_unlock(); } /* * A new entry is added to the page table, increment the counters for that page * verify that it is of correct type and is not being mapped with a different * type to a different process. */ static void page_table_check_set(unsigned long pfn, unsigned long pgcnt, bool rw) { struct page_ext_iter iter; struct page_ext *page_ext; struct page *page; bool anon; if (!pfn_valid(pfn)) return; page = pfn_to_page(pfn); BUG_ON(PageSlab(page)); anon = PageAnon(page); rcu_read_lock(); for_each_page_ext(page, pgcnt, page_ext, iter) { struct page_table_check *ptc = get_page_table_check(page_ext); if (anon) { BUG_ON(atomic_read(&ptc->file_map_count)); BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw); } else { BUG_ON(atomic_read(&ptc->anon_map_count)); BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0); } } rcu_read_unlock(); } /* * page is on free list, or is being allocated, verify that counters are zeroes * crash if they are not. */ void __page_table_check_zero(struct page *page, unsigned int order) { struct page_ext_iter iter; struct page_ext *page_ext; BUG_ON(PageSlab(page)); rcu_read_lock(); for_each_page_ext(page, 1 << order, page_ext, iter) { struct page_table_check *ptc = get_page_table_check(page_ext); BUG_ON(atomic_read(&ptc->anon_map_count)); BUG_ON(atomic_read(&ptc->file_map_count)); } rcu_read_unlock(); } void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte) { if (&init_mm == mm) return; if (pte_user_accessible_page(pte)) { page_table_check_clear(pte_pfn(pte), PAGE_SIZE >> PAGE_SHIFT); } } EXPORT_SYMBOL(__page_table_check_pte_clear); void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd) { if (&init_mm == mm) return; if (pmd_user_accessible_page(pmd)) { page_table_check_clear(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT); } } EXPORT_SYMBOL(__page_table_check_pmd_clear); void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud) { if (&init_mm == mm) return; if (pud_user_accessible_page(pud)) { page_table_check_clear(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT); } } EXPORT_SYMBOL(__page_table_check_pud_clear); /* Whether the swap entry cached writable information */ static inline bool swap_cached_writable(swp_entry_t entry) { return is_writable_device_private_entry(entry) || is_writable_migration_entry(entry); } static inline void page_table_check_pte_flags(pte_t pte) { if (pte_present(pte) && pte_uffd_wp(pte)) WARN_ON_ONCE(pte_write(pte)); else if (is_swap_pte(pte) && pte_swp_uffd_wp(pte)) WARN_ON_ONCE(swap_cached_writable(pte_to_swp_entry(pte))); } void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte, unsigned int nr) { unsigned int i; if (&init_mm == mm) return; page_table_check_pte_flags(pte); for (i = 0; i < nr; i++) __page_table_check_pte_clear(mm, ptep_get(ptep + i)); if (pte_user_accessible_page(pte)) page_table_check_set(pte_pfn(pte), nr, pte_write(pte)); } EXPORT_SYMBOL(__page_table_check_ptes_set); static inline void page_table_check_pmd_flags(pmd_t pmd) { if (pmd_present(pmd) && pmd_uffd_wp(pmd)) WARN_ON_ONCE(pmd_write(pmd)); else if (is_swap_pmd(pmd) && pmd_swp_uffd_wp(pmd)) WARN_ON_ONCE(swap_cached_writable(pmd_to_swp_entry(pmd))); } void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd, unsigned int nr) { unsigned long stride = PMD_SIZE >> PAGE_SHIFT; unsigned int i; if (&init_mm == mm) return; page_table_check_pmd_flags(pmd); for (i = 0; i < nr; i++) __page_table_check_pmd_clear(mm, *(pmdp + i)); if (pmd_user_accessible_page(pmd)) page_table_check_set(pmd_pfn(pmd), stride * nr, pmd_write(pmd)); } EXPORT_SYMBOL(__page_table_check_pmds_set); void __page_table_check_puds_set(struct mm_struct *mm, pud_t *pudp, pud_t pud, unsigned int nr) { unsigned long stride = PUD_SIZE >> PAGE_SHIFT; unsigned int i; if (&init_mm == mm) return; for (i = 0; i < nr; i++) __page_table_check_pud_clear(mm, *(pudp + i)); if (pud_user_accessible_page(pud)) page_table_check_set(pud_pfn(pud), stride * nr, pud_write(pud)); } EXPORT_SYMBOL(__page_table_check_puds_set); void __page_table_check_pte_clear_range(struct mm_struct *mm, unsigned long addr, pmd_t pmd) { if (&init_mm == mm) return; if (!pmd_bad(pmd) && !pmd_leaf(pmd)) { pte_t *ptep = pte_offset_map(&pmd, addr); unsigned long i; if (WARN_ON(!ptep)) return; for (i = 0; i < PTRS_PER_PTE; i++) { __page_table_check_pte_clear(mm, ptep_get(ptep)); addr += PAGE_SIZE; ptep++; } pte_unmap(ptep - PTRS_PER_PTE); } }
631 11 1 3 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MROUTE6_H #define __LINUX_MROUTE6_H #include <linux/pim.h> #include <linux/skbuff.h> /* for struct sk_buff_head */ #include <net/net_namespace.h> #include <uapi/linux/mroute6.h> #include <linux/mroute_base.h> #include <linux/sockptr.h> #include <net/fib_rules.h> #ifdef CONFIG_IPV6_MROUTE static inline int ip6_mroute_opt(int opt) { return (opt >= MRT6_BASE) && (opt <= MRT6_MAX); } #else static inline int ip6_mroute_opt(int opt) { return 0; } #endif struct sock; #ifdef CONFIG_IPV6_MROUTE extern int ip6_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int); extern int ip6_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t); extern int ip6_mr_input(struct sk_buff *skb); extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); extern int ip6_mr_init(void); extern int ip6_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb); extern void ip6_mr_cleanup(void); int ip6mr_ioctl(struct sock *sk, int cmd, void *arg); #else static inline int ip6_mroute_setsockopt(struct sock *sock, int optname, sockptr_t optval, unsigned int optlen) { return -ENOPROTOOPT; } static inline int ip6_mroute_getsockopt(struct sock *sock, int optname, sockptr_t optval, sockptr_t optlen) { return -ENOPROTOOPT; } static inline int ip6mr_ioctl(struct sock *sk, int cmd, void *arg) { return -ENOIOCTLCMD; } static inline int ip6_mr_init(void) { return 0; } static inline int ip6_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb) { return ip6_output(net, sk, skb); } static inline void ip6_mr_cleanup(void) { return; } #endif #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES bool ip6mr_rule_default(const struct fib_rule *rule); #else static inline bool ip6mr_rule_default(const struct fib_rule *rule) { return true; } #endif #define VIFF_STATIC 0x8000 struct mfc6_cache_cmp_arg { struct in6_addr mf6c_mcastgrp; struct in6_addr mf6c_origin; }; struct mfc6_cache { struct mr_mfc _c; union { struct { struct in6_addr mf6c_mcastgrp; struct in6_addr mf6c_origin; }; struct mfc6_cache_cmp_arg cmparg; }; }; #define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */ struct rtmsg; extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, u32 portid); #ifdef CONFIG_IPV6_MROUTE bool mroute6_is_socket(struct net *net, struct sk_buff *skb); extern int ip6mr_sk_done(struct sock *sk); static inline int ip6mr_sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) { switch (cmd) { /* These userspace buffers will be consumed by ip6mr_ioctl() */ case SIOCGETMIFCNT_IN6: { struct sioc_mif_req6 buffer; return sock_ioctl_inout(sk, cmd, arg, &buffer, sizeof(buffer)); } case SIOCGETSGCNT_IN6: { struct sioc_sg_req6 buffer; return sock_ioctl_inout(sk, cmd, arg, &buffer, sizeof(buffer)); } } return 1; } #else static inline bool mroute6_is_socket(struct net *net, struct sk_buff *skb) { return false; } static inline int ip6mr_sk_done(struct sock *sk) { return 0; } static inline int ip6mr_sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) { return 1; } #endif #endif
1 1 1 1 5 1 1 1 1 1 5 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 // SPDX-License-Identifier: GPL-2.0-or-later /* * drivers/media/radio/si470x/radio-si470x-common.c * * Driver for radios with Silicon Labs Si470x FM Radio Receivers * * Copyright (c) 2009 Tobias Lorenz <tobias.lorenz@gmx.net> * Copyright (c) 2012 Hans de Goede <hdegoede@redhat.com> */ /* * History: * 2008-01-12 Tobias Lorenz <tobias.lorenz@gmx.net> * Version 1.0.0 * - First working version * 2008-01-13 Tobias Lorenz <tobias.lorenz@gmx.net> * Version 1.0.1 * - Improved error handling, every function now returns errno * - Improved multi user access (start/mute/stop) * - Channel doesn't get lost anymore after start/mute/stop * - RDS support added (polling mode via interrupt EP 1) * - marked default module parameters with *value* * - switched from bit structs to bit masks * - header file cleaned and integrated * 2008-01-14 Tobias Lorenz <tobias.lorenz@gmx.net> * Version 1.0.2 * - hex values are now lower case * - commented USB ID for ADS/Tech moved on todo list * - blacklisted si470x in hid-quirks.c * - rds buffer handling functions integrated into *_work, *_read * - rds_command in si470x_poll exchanged against simple retval * - check for firmware version 15 * - code order and prototypes still remain the same * - spacing and bottom of band codes remain the same * 2008-01-16 Tobias Lorenz <tobias.lorenz@gmx.net> * Version 1.0.3 * - code reordered to avoid function prototypes * - switch/case defaults are now more user-friendly * - unified comment style * - applied all checkpatch.pl v1.12 suggestions * except the warning about the too long lines with bit comments * - renamed FMRADIO to RADIO to cut line length (checkpatch.pl) * 2008-01-22 Tobias Lorenz <tobias.lorenz@gmx.net> * Version 1.0.4 * - avoid poss. locking when doing copy_to_user which may sleep * - RDS is automatically activated on read now * - code cleaned of unnecessary rds_commands * - USB Vendor/Product ID for ADS/Tech FM Radio Receiver verified * (thanks to Guillaume RAMOUSSE) * 2008-01-27 Tobias Lorenz <tobias.lorenz@gmx.net> * Version 1.0.5 * - number of seek_retries changed to tune_timeout * - fixed problem with incomplete tune operations by own buffers * - optimization of variables and printf types * - improved error logging * 2008-01-31 Tobias Lorenz <tobias.lorenz@gmx.net> * Oliver Neukum <oliver@neukum.org> * Version 1.0.6 * - fixed coverity checker warnings in *_usb_driver_disconnect * - probe()/open() race by correct ordering in probe() * - DMA coherency rules by separate allocation of all buffers * - use of endianness macros * - abuse of spinlock, replaced by mutex * - racy handling of timer in disconnect, * replaced by delayed_work * - racy interruptible_sleep_on(), * replaced with wait_event_interruptible() * - handle signals in read() * 2008-02-08 Tobias Lorenz <tobias.lorenz@gmx.net> * Oliver Neukum <oliver@neukum.org> * Version 1.0.7 * - usb autosuspend support * - unplugging fixed * 2008-05-07 Tobias Lorenz <tobias.lorenz@gmx.net> * Version 1.0.8 * - hardware frequency seek support * - afc indication * - more safety checks, let si470x_get_freq return errno * - vidioc behavior corrected according to v4l2 spec * 2008-10-20 Alexey Klimov <klimov.linux@gmail.com> * - add support for KWorld USB FM Radio FM700 * - blacklisted KWorld radio in hid-core.c and hid-ids.h * 2008-12-03 Mark Lord <mlord@pobox.com> * - add support for DealExtreme USB Radio * 2009-01-31 Bob Ross <pigiron@gmx.com> * - correction of stereo detection/setting * - correction of signal strength indicator scaling * 2009-01-31 Rick Bronson <rick@efn.org> * Tobias Lorenz <tobias.lorenz@gmx.net> * - add LED status output * - get HW/SW version from scratchpad * 2009-06-16 Edouard Lafargue <edouard@lafargue.name> * Version 1.0.10 * - add support for interrupt mode for RDS endpoint, * instead of polling. * Improves RDS reception significantly */ /* kernel includes */ #include "radio-si470x.h" /************************************************************************** * Module Parameters **************************************************************************/ /* Spacing (kHz) */ /* 0: 200 kHz (USA, Australia) */ /* 1: 100 kHz (Europe, Japan) */ /* 2: 50 kHz */ static unsigned short space = 2; module_param(space, ushort, 0444); MODULE_PARM_DESC(space, "Spacing: 0=200kHz 1=100kHz *2=50kHz*"); /* De-emphasis */ /* 0: 75 us (USA) */ /* 1: 50 us (Europe, Australia, Japan) */ static unsigned short de = 1; module_param(de, ushort, 0444); MODULE_PARM_DESC(de, "De-emphasis: 0=75us *1=50us*"); /* Tune timeout */ static unsigned int tune_timeout = 3000; module_param(tune_timeout, uint, 0644); MODULE_PARM_DESC(tune_timeout, "Tune timeout: *3000*"); /* Seek timeout */ static unsigned int seek_timeout = 5000; module_param(seek_timeout, uint, 0644); MODULE_PARM_DESC(seek_timeout, "Seek timeout: *5000*"); static const struct v4l2_frequency_band bands[] = { { .type = V4L2_TUNER_RADIO, .index = 0, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP, .rangelow = 87500 * 16, .rangehigh = 108000 * 16, .modulation = V4L2_BAND_MODULATION_FM, }, { .type = V4L2_TUNER_RADIO, .index = 1, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP, .rangelow = 76000 * 16, .rangehigh = 108000 * 16, .modulation = V4L2_BAND_MODULATION_FM, }, { .type = V4L2_TUNER_RADIO, .index = 2, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP, .rangelow = 76000 * 16, .rangehigh = 90000 * 16, .modulation = V4L2_BAND_MODULATION_FM, }, }; /************************************************************************** * Generic Functions **************************************************************************/ /* * si470x_set_band - set the band */ static int si470x_set_band(struct si470x_device *radio, int band) { if (radio->band == band) return 0; radio->band = band; radio->registers[SYSCONFIG2] &= ~SYSCONFIG2_BAND; radio->registers[SYSCONFIG2] |= radio->band << 6; return radio->set_register(radio, SYSCONFIG2); } /* * si470x_set_chan - set the channel */ static int si470x_set_chan(struct si470x_device *radio, unsigned short chan) { int retval; unsigned long time_left; bool timed_out = false; retval = radio->get_register(radio, POWERCFG); if (retval) return retval; if ((radio->registers[POWERCFG] & (POWERCFG_ENABLE|POWERCFG_DMUTE)) != (POWERCFG_ENABLE|POWERCFG_DMUTE)) { return 0; } /* start tuning */ radio->registers[CHANNEL] &= ~CHANNEL_CHAN; radio->registers[CHANNEL] |= CHANNEL_TUNE | chan; retval = radio->set_register(radio, CHANNEL); if (retval < 0) goto done; /* wait till tune operation has completed */ reinit_completion(&radio->completion); time_left = wait_for_completion_timeout(&radio->completion, msecs_to_jiffies(tune_timeout)); if (time_left == 0) timed_out = true; if ((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0) dev_warn(&radio->videodev.dev, "tune does not complete\n"); if (timed_out) dev_warn(&radio->videodev.dev, "tune timed out after %u ms\n", tune_timeout); /* stop tuning */ radio->registers[CHANNEL] &= ~CHANNEL_TUNE; retval = radio->set_register(radio, CHANNEL); done: return retval; } /* * si470x_get_step - get channel spacing */ static unsigned int si470x_get_step(struct si470x_device *radio) { /* Spacing (kHz) */ switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_SPACE) >> 4) { /* 0: 200 kHz (USA, Australia) */ case 0: return 200 * 16; /* 1: 100 kHz (Europe, Japan) */ case 1: return 100 * 16; /* 2: 50 kHz */ default: return 50 * 16; } } /* * si470x_get_freq - get the frequency */ static int si470x_get_freq(struct si470x_device *radio, unsigned int *freq) { int chan, retval; /* read channel */ retval = radio->get_register(radio, READCHAN); chan = radio->registers[READCHAN] & READCHAN_READCHAN; /* Frequency (MHz) = Spacing (kHz) x Channel + Bottom of Band (MHz) */ *freq = chan * si470x_get_step(radio) + bands[radio->band].rangelow; return retval; } /* * si470x_set_freq - set the frequency */ int si470x_set_freq(struct si470x_device *radio, unsigned int freq) { unsigned short chan; freq = clamp(freq, bands[radio->band].rangelow, bands[radio->band].rangehigh); /* Chan = [ Freq (Mhz) - Bottom of Band (MHz) ] / Spacing (kHz) */ chan = (freq - bands[radio->band].rangelow) / si470x_get_step(radio); return si470x_set_chan(radio, chan); } EXPORT_SYMBOL_GPL(si470x_set_freq); /* * si470x_set_seek - set seek */ static int si470x_set_seek(struct si470x_device *radio, const struct v4l2_hw_freq_seek *seek) { int band, retval; unsigned int freq; bool timed_out = false; unsigned long time_left; /* set band */ if (seek->rangelow || seek->rangehigh) { for (band = 0; band < ARRAY_SIZE(bands); band++) { if (bands[band].rangelow == seek->rangelow && bands[band].rangehigh == seek->rangehigh) break; } if (band == ARRAY_SIZE(bands)) return -EINVAL; /* No matching band found */ } else band = 1; /* If nothing is specified seek 76 - 108 Mhz */ if (radio->band != band) { retval = si470x_get_freq(radio, &freq); if (retval) return retval; retval = si470x_set_band(radio, band); if (retval) return retval; retval = si470x_set_freq(radio, freq); if (retval) return retval; } /* start seeking */ radio->registers[POWERCFG] |= POWERCFG_SEEK; if (seek->wrap_around) radio->registers[POWERCFG] &= ~POWERCFG_SKMODE; else radio->registers[POWERCFG] |= POWERCFG_SKMODE; if (seek->seek_upward) radio->registers[POWERCFG] |= POWERCFG_SEEKUP; else radio->registers[POWERCFG] &= ~POWERCFG_SEEKUP; retval = radio->set_register(radio, POWERCFG); if (retval < 0) return retval; /* wait till tune operation has completed */ reinit_completion(&radio->completion); time_left = wait_for_completion_timeout(&radio->completion, msecs_to_jiffies(seek_timeout)); if (time_left == 0) timed_out = true; if ((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0) dev_warn(&radio->videodev.dev, "seek does not complete\n"); if (radio->registers[STATUSRSSI] & STATUSRSSI_SF) dev_warn(&radio->videodev.dev, "seek failed / band limit reached\n"); /* stop seeking */ radio->registers[POWERCFG] &= ~POWERCFG_SEEK; retval = radio->set_register(radio, POWERCFG); /* try again, if timed out */ if (retval == 0 && timed_out) return -ENODATA; return retval; } /* * si470x_start - switch on radio */ int si470x_start(struct si470x_device *radio) { int retval; /* powercfg */ radio->registers[POWERCFG] = POWERCFG_DMUTE | POWERCFG_ENABLE | POWERCFG_RDSM; retval = radio->set_register(radio, POWERCFG); if (retval < 0) goto done; /* sysconfig 1 */ radio->registers[SYSCONFIG1] |= SYSCONFIG1_RDSIEN | SYSCONFIG1_STCIEN | SYSCONFIG1_RDS; radio->registers[SYSCONFIG1] &= ~SYSCONFIG1_GPIO2; radio->registers[SYSCONFIG1] |= SYSCONFIG1_GPIO2_INT; if (de) radio->registers[SYSCONFIG1] |= SYSCONFIG1_DE; retval = radio->set_register(radio, SYSCONFIG1); if (retval < 0) goto done; /* sysconfig 2 */ radio->registers[SYSCONFIG2] = (0x1f << 8) | /* SEEKTH */ ((radio->band << 6) & SYSCONFIG2_BAND) |/* BAND */ ((space << 4) & SYSCONFIG2_SPACE) | /* SPACE */ 15; /* VOLUME (max) */ retval = radio->set_register(radio, SYSCONFIG2); if (retval < 0) goto done; /* reset last channel */ retval = si470x_set_chan(radio, radio->registers[CHANNEL] & CHANNEL_CHAN); done: return retval; } EXPORT_SYMBOL_GPL(si470x_start); /* * si470x_stop - switch off radio */ int si470x_stop(struct si470x_device *radio) { int retval; /* sysconfig 1 */ radio->registers[SYSCONFIG1] &= ~SYSCONFIG1_RDS; retval = radio->set_register(radio, SYSCONFIG1); if (retval < 0) goto done; /* powercfg */ radio->registers[POWERCFG] &= ~POWERCFG_DMUTE; /* POWERCFG_ENABLE has to automatically go low */ radio->registers[POWERCFG] |= POWERCFG_ENABLE | POWERCFG_DISABLE; retval = radio->set_register(radio, POWERCFG); done: return retval; } EXPORT_SYMBOL_GPL(si470x_stop); /* * si470x_rds_on - switch on rds reception */ static int si470x_rds_on(struct si470x_device *radio) { int retval; /* sysconfig 1 */ radio->registers[SYSCONFIG1] |= SYSCONFIG1_RDS; retval = radio->set_register(radio, SYSCONFIG1); if (retval < 0) radio->registers[SYSCONFIG1] &= ~SYSCONFIG1_RDS; return retval; } /************************************************************************** * File Operations Interface **************************************************************************/ /* * si470x_fops_read - read RDS data */ static ssize_t si470x_fops_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct si470x_device *radio = video_drvdata(file); int retval = 0; unsigned int block_count = 0; /* switch on rds reception */ if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0) si470x_rds_on(radio); /* block if no new data available */ while (radio->wr_index == radio->rd_index) { if (file->f_flags & O_NONBLOCK) { retval = -EWOULDBLOCK; goto done; } if (wait_event_interruptible(radio->read_queue, radio->wr_index != radio->rd_index) < 0) { retval = -EINTR; goto done; } } /* calculate block count from byte count */ count /= 3; /* copy RDS block out of internal buffer and to user buffer */ while (block_count < count) { if (radio->rd_index == radio->wr_index) break; /* always transfer rds complete blocks */ if (copy_to_user(buf, &radio->buffer[radio->rd_index], 3)) /* retval = -EFAULT; */ break; /* increment and wrap read pointer */ radio->rd_index += 3; if (radio->rd_index >= radio->buf_size) radio->rd_index = 0; /* increment counters */ block_count++; buf += 3; retval += 3; } done: return retval; } /* * si470x_fops_poll - poll RDS data */ static __poll_t si470x_fops_poll(struct file *file, struct poll_table_struct *pts) { struct si470x_device *radio = video_drvdata(file); __poll_t req_events = poll_requested_events(pts); __poll_t retval = v4l2_ctrl_poll(file, pts); if (req_events & (EPOLLIN | EPOLLRDNORM)) { /* switch on rds reception */ if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0) si470x_rds_on(radio); poll_wait(file, &radio->read_queue, pts); if (radio->rd_index != radio->wr_index) retval |= EPOLLIN | EPOLLRDNORM; } return retval; } static int si470x_fops_open(struct file *file) { struct si470x_device *radio = video_drvdata(file); return radio->fops_open(file); } /* * si470x_fops_release - file release */ static int si470x_fops_release(struct file *file) { struct si470x_device *radio = video_drvdata(file); return radio->fops_release(file); } /* * si470x_fops - file operations interface */ static const struct v4l2_file_operations si470x_fops = { .owner = THIS_MODULE, .read = si470x_fops_read, .poll = si470x_fops_poll, .unlocked_ioctl = video_ioctl2, .open = si470x_fops_open, .release = si470x_fops_release, }; /************************************************************************** * Video4Linux Interface **************************************************************************/ static int si470x_s_ctrl(struct v4l2_ctrl *ctrl) { struct si470x_device *radio = container_of(ctrl->handler, struct si470x_device, hdl); switch (ctrl->id) { case V4L2_CID_AUDIO_VOLUME: radio->registers[SYSCONFIG2] &= ~SYSCONFIG2_VOLUME; radio->registers[SYSCONFIG2] |= ctrl->val; return radio->set_register(radio, SYSCONFIG2); case V4L2_CID_AUDIO_MUTE: if (ctrl->val) radio->registers[POWERCFG] &= ~POWERCFG_DMUTE; else radio->registers[POWERCFG] |= POWERCFG_DMUTE; return radio->set_register(radio, POWERCFG); default: return -EINVAL; } } /* * si470x_vidioc_g_tuner - get tuner attributes */ static int si470x_vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *tuner) { struct si470x_device *radio = video_drvdata(file); int retval = 0; if (tuner->index != 0) return -EINVAL; if (!radio->status_rssi_auto_update) { retval = radio->get_register(radio, STATUSRSSI); if (retval < 0) return retval; } /* driver constants */ strscpy(tuner->name, "FM", sizeof(tuner->name)); tuner->type = V4L2_TUNER_RADIO; tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP; tuner->rangelow = 76 * FREQ_MUL; tuner->rangehigh = 108 * FREQ_MUL; /* stereo indicator == stereo (instead of mono) */ if ((radio->registers[STATUSRSSI] & STATUSRSSI_ST) == 0) tuner->rxsubchans = V4L2_TUNER_SUB_MONO; else tuner->rxsubchans = V4L2_TUNER_SUB_STEREO; /* If there is a reliable method of detecting an RDS channel, then this code should check for that before setting this RDS subchannel. */ tuner->rxsubchans |= V4L2_TUNER_SUB_RDS; /* mono/stereo selector */ if ((radio->registers[POWERCFG] & POWERCFG_MONO) == 0) tuner->audmode = V4L2_TUNER_MODE_STEREO; else tuner->audmode = V4L2_TUNER_MODE_MONO; /* min is worst, max is best; signal:0..0xffff; rssi: 0..0xff */ /* measured in units of dbµV in 1 db increments (max at ~75 dbµV) */ tuner->signal = (radio->registers[STATUSRSSI] & STATUSRSSI_RSSI); /* the ideal factor is 0xffff/75 = 873,8 */ tuner->signal = (tuner->signal * 873) + (8 * tuner->signal / 10); if (tuner->signal > 0xffff) tuner->signal = 0xffff; /* automatic frequency control: -1: freq to low, 1 freq to high */ /* AFCRL does only indicate that freq. differs, not if too low/high */ tuner->afc = (radio->registers[STATUSRSSI] & STATUSRSSI_AFCRL) ? 1 : 0; return retval; } /* * si470x_vidioc_s_tuner - set tuner attributes */ static int si470x_vidioc_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *tuner) { struct si470x_device *radio = video_drvdata(file); if (tuner->index != 0) return -EINVAL; /* mono/stereo selector */ switch (tuner->audmode) { case V4L2_TUNER_MODE_MONO: radio->registers[POWERCFG] |= POWERCFG_MONO; /* force mono */ break; case V4L2_TUNER_MODE_STEREO: default: radio->registers[POWERCFG] &= ~POWERCFG_MONO; /* try stereo */ break; } return radio->set_register(radio, POWERCFG); } /* * si470x_vidioc_g_frequency - get tuner or modulator radio frequency */ static int si470x_vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *freq) { struct si470x_device *radio = video_drvdata(file); if (freq->tuner != 0) return -EINVAL; freq->type = V4L2_TUNER_RADIO; return si470x_get_freq(radio, &freq->frequency); } /* * si470x_vidioc_s_frequency - set tuner or modulator radio frequency */ static int si470x_vidioc_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *freq) { struct si470x_device *radio = video_drvdata(file); int retval; if (freq->tuner != 0) return -EINVAL; if (freq->frequency < bands[radio->band].rangelow || freq->frequency > bands[radio->band].rangehigh) { /* Switch to band 1 which covers everything we support */ retval = si470x_set_band(radio, 1); if (retval) return retval; } return si470x_set_freq(radio, freq->frequency); } /* * si470x_vidioc_s_hw_freq_seek - set hardware frequency seek */ static int si470x_vidioc_s_hw_freq_seek(struct file *file, void *priv, const struct v4l2_hw_freq_seek *seek) { struct si470x_device *radio = video_drvdata(file); if (seek->tuner != 0) return -EINVAL; if (file->f_flags & O_NONBLOCK) return -EWOULDBLOCK; return si470x_set_seek(radio, seek); } /* * si470x_vidioc_enum_freq_bands - enumerate supported bands */ static int si470x_vidioc_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band) { if (band->tuner != 0) return -EINVAL; if (band->index >= ARRAY_SIZE(bands)) return -EINVAL; *band = bands[band->index]; return 0; } const struct v4l2_ctrl_ops si470x_ctrl_ops = { .s_ctrl = si470x_s_ctrl, }; EXPORT_SYMBOL_GPL(si470x_ctrl_ops); static int si470x_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *capability) { struct si470x_device *radio = video_drvdata(file); return radio->vidioc_querycap(file, priv, capability); }; /* * si470x_ioctl_ops - video device ioctl operations */ static const struct v4l2_ioctl_ops si470x_ioctl_ops = { .vidioc_querycap = si470x_vidioc_querycap, .vidioc_g_tuner = si470x_vidioc_g_tuner, .vidioc_s_tuner = si470x_vidioc_s_tuner, .vidioc_g_frequency = si470x_vidioc_g_frequency, .vidioc_s_frequency = si470x_vidioc_s_frequency, .vidioc_s_hw_freq_seek = si470x_vidioc_s_hw_freq_seek, .vidioc_enum_freq_bands = si470x_vidioc_enum_freq_bands, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; /* * si470x_viddev_template - video device interface */ const struct video_device si470x_viddev_template = { .fops = &si470x_fops, .name = DRIVER_NAME, .release = video_device_release_empty, .ioctl_ops = &si470x_ioctl_ops, }; EXPORT_SYMBOL_GPL(si470x_viddev_template); MODULE_DESCRIPTION("Core radio driver for Si470x FM Radio Receivers"); MODULE_LICENSE("GPL");
2 28 28 27 1 28 21 19 19 7 38 38 3 38 9 2 29 19 19 19 19 19 6 13 13 29 42 41 41 42 41 43 42 12 39 38 1 37 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 // SPDX-License-Identifier: GPL-2.0-only /* * kexec.c - kexec_load system call * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/capability.h> #include <linux/mm.h> #include <linux/file.h> #include <linux/security.h> #include <linux/kexec.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/syscalls.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include "kexec_internal.h" static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, unsigned long nr_segments, struct kexec_segment *segments, unsigned long flags) { int ret; struct kimage *image; bool kexec_on_panic = flags & KEXEC_ON_CRASH; #ifdef CONFIG_CRASH_DUMP if (kexec_on_panic) { /* Verify we have a valid entry point */ if ((entry < phys_to_boot_phys(crashk_res.start)) || (entry > phys_to_boot_phys(crashk_res.end))) return -EADDRNOTAVAIL; } #endif /* Allocate and initialize a controlling structure */ image = do_kimage_alloc_init(); if (!image) return -ENOMEM; image->start = entry; image->nr_segments = nr_segments; memcpy(image->segment, segments, nr_segments * sizeof(*segments)); #ifdef CONFIG_CRASH_DUMP if (kexec_on_panic) { /* Enable special crash kernel control page alloc policy. */ image->control_page = crashk_res.start; image->type = KEXEC_TYPE_CRASH; } #endif ret = sanity_check_segment_list(image); if (ret) goto out_free_image; /* * Find a location for the control code buffer, and add it * the vector of segments so that it's pages will also be * counted as destination pages. */ ret = -ENOMEM; image->control_code_page = kimage_alloc_control_pages(image, get_order(KEXEC_CONTROL_PAGE_SIZE)); if (!image->control_code_page) { pr_err("Could not allocate control_code_buffer\n"); goto out_free_image; } if (!kexec_on_panic) { image->swap_page = kimage_alloc_control_pages(image, 0); if (!image->swap_page) { pr_err("Could not allocate swap buffer\n"); goto out_free_control_pages; } } *rimage = image; return 0; out_free_control_pages: kimage_free_page_list(&image->control_pages); out_free_image: kfree(image); return ret; } static int do_kexec_load(unsigned long entry, unsigned long nr_segments, struct kexec_segment *segments, unsigned long flags) { struct kimage **dest_image, *image; unsigned long i; int ret; /* * Because we write directly to the reserved memory region when loading * crash kernels we need a serialization here to prevent multiple crash * kernels from attempting to load simultaneously. */ if (!kexec_trylock()) return -EBUSY; #ifdef CONFIG_CRASH_DUMP if (flags & KEXEC_ON_CRASH) { dest_image = &kexec_crash_image; if (kexec_crash_image) arch_kexec_unprotect_crashkres(); } else #endif dest_image = &kexec_image; if (nr_segments == 0) { /* Uninstall image */ kimage_free(xchg(dest_image, NULL)); ret = 0; goto out_unlock; } if (flags & KEXEC_ON_CRASH) { /* * Loading another kernel to switch to if this one * crashes. Free any current crash dump kernel before * we corrupt it. */ kimage_free(xchg(&kexec_crash_image, NULL)); } ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags); if (ret) goto out_unlock; if (flags & KEXEC_PRESERVE_CONTEXT) image->preserve_context = 1; #ifdef CONFIG_CRASH_HOTPLUG if ((flags & KEXEC_ON_CRASH) && arch_crash_hotplug_support(image, flags)) image->hotplug_support = 1; #endif ret = machine_kexec_prepare(image); if (ret) goto out; /* * Some architecture(like S390) may touch the crash memory before * machine_kexec_prepare(), we must copy vmcoreinfo data after it. */ ret = kimage_crash_copy_vmcoreinfo(image); if (ret) goto out; for (i = 0; i < nr_segments; i++) { ret = kimage_load_segment(image, i); if (ret) goto out; } kimage_terminate(image); ret = machine_kexec_post_load(image); if (ret) goto out; /* Install the new kernel and uninstall the old */ image = xchg(dest_image, image); out: #ifdef CONFIG_CRASH_DUMP if ((flags & KEXEC_ON_CRASH) && kexec_crash_image) arch_kexec_protect_crashkres(); #endif kimage_free(image); out_unlock: kexec_unlock(); return ret; } /* * Exec Kernel system call: for obvious reasons only root may call it. * * This call breaks up into three pieces. * - A generic part which loads the new kernel from the current * address space, and very carefully places the data in the * allocated pages. * * - A generic part that interacts with the kernel and tells all of * the devices to shut down. Preventing on-going dmas, and placing * the devices in a consistent state so a later kernel can * reinitialize them. * * - A machine specific part that includes the syscall number * and then copies the image to it's final destination. And * jumps into the image at entry. * * kexec does not sync, or unmount filesystems so if you need * that to happen you need to do that yourself. */ static inline int kexec_load_check(unsigned long nr_segments, unsigned long flags) { int image_type = (flags & KEXEC_ON_CRASH) ? KEXEC_TYPE_CRASH : KEXEC_TYPE_DEFAULT; int result; /* We only trust the superuser with rebooting the system. */ if (!kexec_load_permitted(image_type)) return -EPERM; /* Permit LSMs and IMA to fail the kexec */ result = security_kernel_load_data(LOADING_KEXEC_IMAGE, false); if (result < 0) return result; /* * kexec can be used to circumvent module loading restrictions, so * prevent loading in that case */ result = security_locked_down(LOCKDOWN_KEXEC); if (result) return result; /* * Verify we have a legal set of flags * This leaves us room for future extensions. */ if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) return -EINVAL; /* Put an artificial cap on the number * of segments passed to kexec_load. */ if (nr_segments > KEXEC_SEGMENT_MAX) return -EINVAL; return 0; } SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, struct kexec_segment __user *, segments, unsigned long, flags) { struct kexec_segment *ksegments; unsigned long result; result = kexec_load_check(nr_segments, flags); if (result) return result; /* Verify we are on the appropriate architecture */ if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) return -EINVAL; ksegments = memdup_array_user(segments, nr_segments, sizeof(ksegments[0])); if (IS_ERR(ksegments)) return PTR_ERR(ksegments); result = do_kexec_load(entry, nr_segments, ksegments, flags); kfree(ksegments); return result; } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, compat_ulong_t, nr_segments, struct compat_kexec_segment __user *, segments, compat_ulong_t, flags) { struct compat_kexec_segment in; struct kexec_segment *ksegments; unsigned long i, result; result = kexec_load_check(nr_segments, flags); if (result) return result; /* Don't allow clients that don't understand the native * architecture to do anything. */ if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) return -EINVAL; ksegments = kmalloc_array(nr_segments, sizeof(ksegments[0]), GFP_KERNEL); if (!ksegments) return -ENOMEM; for (i = 0; i < nr_segments; i++) { result = copy_from_user(&in, &segments[i], sizeof(in)); if (result) goto fail; ksegments[i].buf = compat_ptr(in.buf); ksegments[i].bufsz = in.bufsz; ksegments[i].mem = in.mem; ksegments[i].memsz = in.memsz; } result = do_kexec_load(entry, nr_segments, ksegments, flags); fail: kfree(ksegments); return result; } #endif
14 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 /* * include/net/tipc.h: Include file for TIPC message header routines * * Copyright (c) 2017 Ericsson AB * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _TIPC_HDR_H #define _TIPC_HDR_H #include <linux/random.h> #define KEEPALIVE_MSG_MASK 0x0e080000 /* LINK_PROTOCOL + MSG_IS_KEEPALIVE */ struct tipc_basic_hdr { __be32 w[4]; }; static inline __be32 tipc_hdr_rps_key(struct tipc_basic_hdr *hdr) { u32 w0 = ntohl(hdr->w[0]); bool keepalive_msg = (w0 & KEEPALIVE_MSG_MASK) == KEEPALIVE_MSG_MASK; __be32 key; /* Return source node identity as key */ if (likely(!keepalive_msg)) return hdr->w[3]; /* Spread PROBE/PROBE_REPLY messages across the cores */ get_random_bytes(&key, sizeof(key)); return key; } #endif
38 37 38 30 38 38 38 37 35 35 35 38 38 158 160 161 160 25 25 161 165 165 165 161 30 30 38 164 164 165 165 164 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 /* SPDX-License-Identifier: GPL-2.0-only * * Generic bitmap / 8 bpp image bitstreamer for packed pixel framebuffers * * Rewritten by: * Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org) * * Based on previous work of: * Copyright (C) June 1999 James Simmons * Anton Vorontsov <avorontsov@ru.mvista.com> * Pavel Pisa <pisa@cmp.felk.cvut.cz> * Antonino A. Daplas <adaplas@gmail.com> * and others * * NOTES: * * Handles native and foreign byte order on both endians, standard and * reverse pixel order in a byte (<8 BPP), word length of 32/64 bits, * bits per pixel from 1 to the word length. Handles line lengths at byte * granularity while maintaining aligned accesses. * * Optimized routines for word aligned 1, 2, 4 pixel per word for high * bpp modes and 4 pixel at a time operation for low bpp. * * The color image is expected to be one byte per pixel, and values should * not exceed the bitdepth or the pseudo palette (if used). */ #include "fb_draw.h" /* bitmap image iterator, one pixel at a time */ struct fb_bitmap_iter { const u8 *data; unsigned long colors[2]; int width, i; }; static bool fb_bitmap_image(void *iterator, unsigned long *pixels, int *bits) { struct fb_bitmap_iter *iter = iterator; if (iter->i < iter->width) { int bit = ~iter->i & (BITS_PER_BYTE-1); int byte = iter->i++ / BITS_PER_BYTE; *pixels = iter->colors[(iter->data[byte] >> bit) & 1]; return true; } iter->data += BITS_TO_BYTES(iter->width); iter->i = 0; return false; } /* color image iterator, one pixel at a time */ struct fb_color_iter { const u8 *data; const u32 *palette; struct fb_reverse reverse; int shift; int width, i; }; static bool fb_color_image(void *iterator, unsigned long *pixels, int *bits) { struct fb_color_iter *iter = iterator; if (iter->i < iter->width) { unsigned long color = iter->data[iter->i++]; if (iter->palette) color = iter->palette[color]; *pixels = color << iter->shift; if (iter->reverse.pixel) *pixels = fb_reverse_bits_long(*pixels); return true; } iter->data += iter->width; iter->i = 0; return false; } /* bitmap image iterator, 4 pixels at a time */ struct fb_bitmap4x_iter { const u8 *data; u32 fgxcolor, bgcolor; int width, i; const u32 *expand; int bpp; bool top; }; static bool fb_bitmap4x_image(void *iterator, unsigned long *pixels, int *bits) { struct fb_bitmap4x_iter *iter = iterator; u8 data; if (iter->i >= BITS_PER_BYTE/2) { iter->i -= BITS_PER_BYTE/2; iter->top = !iter->top; if (iter->top) data = *iter->data++ >> BITS_PER_BYTE/2; else data = iter->data[-1] & ((1 << BITS_PER_BYTE/2)-1); } else if (iter->i != 0) { *bits = iter->bpp * iter->i; if (iter->top) data = iter->data[-1] & ((1 << BITS_PER_BYTE/2)-1); else data = *iter->data++ >> BITS_PER_BYTE/2; #ifndef __LITTLE_ENDIAN data >>= BITS_PER_BYTE/2 - iter->i; #endif iter->i = 0; } else { *bits = iter->bpp * BITS_PER_BYTE/2; iter->i = iter->width; iter->top = false; return false; } *pixels = (iter->fgxcolor & iter->expand[data]) ^ iter->bgcolor; #ifndef __LITTLE_ENDIAN *pixels <<= BITS_PER_LONG - *bits; #endif return true; } /* draw a line a group of pixels at a time */ static __always_inline void fb_bitblit(bool (*get)(void *iter, unsigned long *pixels, int *bits), void *iter, int bits, struct fb_address *dst, struct fb_reverse reverse) { unsigned long pixels, val, mask, old; int offset = 0; int shift = dst->bits; if (shift) { old = fb_read_offset(0, dst); val = fb_reverse_long(old, reverse); val &= ~fb_right(~0UL, shift); } else { old = 0; val = 0; } while (get(iter, &pixels, &bits)) { val |= fb_right(pixels, shift); shift += bits; if (shift < BITS_PER_LONG) continue; val = fb_reverse_long(val, reverse); fb_write_offset(val, offset++, dst); shift &= BITS_PER_LONG - 1; val = !shift ? 0 : fb_left(pixels, bits - shift); } if (shift) { mask = ~fb_pixel_mask(shift, reverse); val = fb_reverse_long(val, reverse); if (offset || !dst->bits) old = fb_read_offset(offset, dst); fb_write_offset(fb_comp(val, old, mask), offset, dst); } } /* draw a color image a pixel at a time */ static inline void fb_color_imageblit(const struct fb_image *image, struct fb_address *dst, unsigned int bits_per_line, const u32 *palette, int bpp, struct fb_reverse reverse) { struct fb_color_iter iter; u32 height; iter.data = (const u8 *)image->data; iter.palette = palette; iter.reverse = reverse; #ifdef __LITTLE_ENDIAN if (reverse.pixel) iter.shift = BITS_PER_BYTE - bpp; else iter.shift = 0; #else if (reverse.pixel) iter.shift = BITS_PER_LONG - BITS_PER_BYTE; else iter.shift = BITS_PER_LONG - bpp; #endif iter.width = image->width; iter.i = 0; height = image->height; while (height--) { fb_bitblit(fb_color_image, &iter, bpp, dst, reverse); fb_address_forward(dst, bits_per_line); } } #ifdef __LITTLE_ENDIAN #define FB_GEN(a, b) (((a)/8+(((a)&4)<<((b)-2)) \ +(((a)&2)<<((b)*2-1))+(((a)&1u)<<((b)*3)))*((1<<(b))-1)) #define FB_GEN1(a) ((a)/8+((a)&4)/2+((a)&2)*2+((a)&1)*8) #else #define FB_GEN(a, b) (((((a)/8)<<((b)*3))+(((a)&4)<<((b)*2-2)) \ +(((a)&2)<<(b-1))+((a)&1u))*((1<<(b))-1)) #define FB_GEN1(a) (a) #endif #define FB_GENx(a) { FB_GEN(0, (a)), FB_GEN(1, (a)), FB_GEN(2, (a)), FB_GEN(3, (a)), \ FB_GEN(4, (a)), FB_GEN(5, (a)), FB_GEN(6, (a)), FB_GEN(7, (a)), \ FB_GEN(8, (a)), FB_GEN(9, (a)), FB_GEN(10, (a)), FB_GEN(11, (a)), \ FB_GEN(12, (a)), FB_GEN(13, (a)), FB_GEN(14, (a)), FB_GEN(15, (a)) } /* draw a 2 color image four pixels at a time (for 1-8 bpp only) */ static inline void fb_bitmap4x_imageblit(const struct fb_image *image, struct fb_address *dst, unsigned long fgcolor, unsigned long bgcolor, int bpp, unsigned int bits_per_line, struct fb_reverse reverse) { static const u32 mul[BITS_PER_BYTE] = { 0xf, 0x55, 0x249, 0x1111, 0x8421, 0x41041, 0x204081, 0x01010101 }; static const u32 expand[BITS_PER_BYTE][1 << 4] = { { FB_GEN1(0), FB_GEN1(1), FB_GEN1(2), FB_GEN1(3), FB_GEN1(4), FB_GEN1(5), FB_GEN1(6), FB_GEN1(7), FB_GEN1(8), FB_GEN1(9), FB_GEN1(10), FB_GEN1(11), FB_GEN1(12), FB_GEN1(13), FB_GEN1(14), FB_GEN1(15) }, FB_GENx(2), FB_GENx(3), FB_GENx(4), FB_GENx(5), FB_GENx(6), FB_GENx(7), FB_GENx(8), }; struct fb_bitmap4x_iter iter; u32 height; iter.data = (const u8 *)image->data; if (reverse.pixel) { fgcolor = fb_reverse_bits_long(fgcolor << (BITS_PER_BYTE - bpp)); bgcolor = fb_reverse_bits_long(bgcolor << (BITS_PER_BYTE - bpp)); } iter.fgxcolor = (fgcolor ^ bgcolor) * mul[bpp-1]; iter.bgcolor = bgcolor * mul[bpp-1]; iter.width = image->width; iter.i = image->width; iter.expand = expand[bpp-1]; iter.bpp = bpp; iter.top = false; height = image->height; while (height--) { fb_bitblit(fb_bitmap4x_image, &iter, bpp * BITS_PER_BYTE/2, dst, reverse); fb_address_forward(dst, bits_per_line); } } /* draw a bitmap image 1 pixel at a time (for >8 bpp) */ static inline void fb_bitmap1x_imageblit(const struct fb_image *image, struct fb_address *dst, unsigned long fgcolor, unsigned long bgcolor, int bpp, unsigned int bits_per_line, struct fb_reverse reverse) { struct fb_bitmap_iter iter; u32 height; iter.colors[0] = bgcolor; iter.colors[1] = fgcolor; #ifndef __LITTLE_ENDIAN iter.colors[0] <<= BITS_PER_LONG - bpp; iter.colors[1] <<= BITS_PER_LONG - bpp; #endif iter.data = (const u8 *)image->data; iter.width = image->width; iter.i = 0; height = image->height; while (height--) { fb_bitblit(fb_bitmap_image, &iter, bpp, dst, reverse); fb_address_forward(dst, bits_per_line); } } /* one pixel per word, 64/32 bpp blitting */ static inline void fb_bitmap_1ppw(const struct fb_image *image, struct fb_address *dst, unsigned long fgcolor, unsigned long bgcolor, int words_per_line, struct fb_reverse reverse) { unsigned long tab[2]; const u8 *src = (u8 *)image->data; int width = image->width; int offset; u32 height; if (reverse.byte) { tab[0] = swab_long(bgcolor); tab[1] = swab_long(fgcolor); } else { tab[0] = bgcolor; tab[1] = fgcolor; } height = image->height; while (height--) { for (offset = 0; offset + 8 <= width; offset += 8) { unsigned int srcbyte = *src++; fb_write_offset(tab[(srcbyte >> 7) & 1], offset + 0, dst); fb_write_offset(tab[(srcbyte >> 6) & 1], offset + 1, dst); fb_write_offset(tab[(srcbyte >> 5) & 1], offset + 2, dst); fb_write_offset(tab[(srcbyte >> 4) & 1], offset + 3, dst); fb_write_offset(tab[(srcbyte >> 3) & 1], offset + 4, dst); fb_write_offset(tab[(srcbyte >> 2) & 1], offset + 5, dst); fb_write_offset(tab[(srcbyte >> 1) & 1], offset + 6, dst); fb_write_offset(tab[(srcbyte >> 0) & 1], offset + 7, dst); } if (offset < width) { unsigned int srcbyte = *src++; while (offset < width) { fb_write_offset(tab[(srcbyte >> 7) & 1], offset, dst); srcbyte <<= 1; offset++; } } fb_address_move_long(dst, words_per_line); } } static inline unsigned long fb_pack(unsigned long left, unsigned long right, int bits) { #ifdef __LITTLE_ENDIAN return left | right << bits; #else return right | left << bits; #endif } /* aligned 32/16 bpp blitting */ static inline void fb_bitmap_2ppw(const struct fb_image *image, struct fb_address *dst, unsigned long fgcolor, unsigned long bgcolor, int words_per_line, struct fb_reverse reverse) { unsigned long tab[4]; const u8 *src = (u8 *)image->data; int width = image->width / 2; int offset; u32 height; tab[0] = fb_pack(bgcolor, bgcolor, BITS_PER_LONG/2); tab[1] = fb_pack(bgcolor, fgcolor, BITS_PER_LONG/2); tab[2] = fb_pack(fgcolor, bgcolor, BITS_PER_LONG/2); tab[3] = fb_pack(fgcolor, fgcolor, BITS_PER_LONG/2); if (reverse.byte) { tab[0] = swab_long(tab[0]); tab[1] = swab_long(tab[1]); tab[2] = swab_long(tab[2]); tab[3] = swab_long(tab[3]); } height = image->height; while (height--) { for (offset = 0; offset + 4 <= width; offset += 4) { unsigned int srcbyte = *src++; fb_write_offset(tab[(srcbyte >> 6) & 3], offset + 0, dst); fb_write_offset(tab[(srcbyte >> 4) & 3], offset + 1, dst); fb_write_offset(tab[(srcbyte >> 2) & 3], offset + 2, dst); fb_write_offset(tab[(srcbyte >> 0) & 3], offset + 3, dst); } if (offset < width) { unsigned int srcbyte = *src++; while (offset < width) { fb_write_offset(tab[(srcbyte >> 6) & 3], offset, dst); srcbyte <<= 2; offset++; } } fb_address_move_long(dst, words_per_line); } } #define FB_PATP(a, b) (((a)<<((b)*BITS_PER_LONG/4))*((1UL<<BITS_PER_LONG/4)-1UL)) #define FB_PAT4(a) (FB_PATP((a)&1, 0)|FB_PATP(((a)&2)/2, 1)| \ FB_PATP(((a)&4)/4, 2)|FB_PATP(((a)&8)/8, 3)) /* aligned 16/8 bpp blitting */ static inline void fb_bitmap_4ppw(const struct fb_image *image, struct fb_address *dst, unsigned long fgcolor, unsigned long bgcolor, int words_per_line, struct fb_reverse reverse) { static const unsigned long tab16_be[] = { 0, FB_PAT4(1UL), FB_PAT4(2UL), FB_PAT4(3UL), FB_PAT4(4UL), FB_PAT4(5UL), FB_PAT4(6UL), FB_PAT4(7UL), FB_PAT4(8UL), FB_PAT4(9UL), FB_PAT4(10UL), FB_PAT4(11UL), FB_PAT4(12UL), FB_PAT4(13UL), FB_PAT4(14UL), ~0UL }; static const unsigned long tab16_le[] = { 0, FB_PAT4(8UL), FB_PAT4(4UL), FB_PAT4(12UL), FB_PAT4(2UL), FB_PAT4(10UL), FB_PAT4(6UL), FB_PAT4(14UL), FB_PAT4(1UL), FB_PAT4(9UL), FB_PAT4(5UL), FB_PAT4(13UL), FB_PAT4(3UL), FB_PAT4(11UL), FB_PAT4(7UL), ~0UL }; const unsigned long *tab; const u8 *src = (u8 *)image->data; int width = image->width / 4; int offset; u32 height; fgcolor = fgcolor | fgcolor << BITS_PER_LONG/4; bgcolor = bgcolor | bgcolor << BITS_PER_LONG/4; fgcolor = fgcolor | fgcolor << BITS_PER_LONG/2; bgcolor = bgcolor | bgcolor << BITS_PER_LONG/2; fgcolor ^= bgcolor; if (BITS_PER_LONG/4 > BITS_PER_BYTE && reverse.byte) { fgcolor = swab_long(fgcolor); bgcolor = swab_long(bgcolor); } #ifdef __LITTLE_ENDIAN tab = reverse.byte ? tab16_be : tab16_le; #else tab = reverse.byte ? tab16_le : tab16_be; #endif height = image->height; while (height--) { for (offset = 0; offset + 2 <= width; offset += 2, src++) { fb_write_offset((fgcolor & tab[*src >> 4]) ^ bgcolor, offset + 0, dst); fb_write_offset((fgcolor & tab[*src & 0xf]) ^ bgcolor, offset + 1, dst); } if (offset < width) fb_write_offset((fgcolor & tab[*src++ >> 4]) ^ bgcolor, offset, dst); fb_address_move_long(dst, words_per_line); } } static inline void fb_bitmap_imageblit(const struct fb_image *image, struct fb_address *dst, unsigned int bits_per_line, const u32 *palette, int bpp, struct fb_reverse reverse) { unsigned long fgcolor, bgcolor; if (palette) { fgcolor = palette[image->fg_color]; bgcolor = palette[image->bg_color]; } else { fgcolor = image->fg_color; bgcolor = image->bg_color; } if (!dst->bits && !(bits_per_line & (BITS_PER_LONG-1))) { if (bpp == BITS_PER_LONG && BITS_PER_LONG == 32) { fb_bitmap_1ppw(image, dst, fgcolor, bgcolor, bits_per_line / BITS_PER_LONG, reverse); return; } if (bpp == BITS_PER_LONG/2 && !(image->width & 1)) { fb_bitmap_2ppw(image, dst, fgcolor, bgcolor, bits_per_line / BITS_PER_LONG, reverse); return; } if (bpp == BITS_PER_LONG/4 && !(image->width & 3)) { fb_bitmap_4ppw(image, dst, fgcolor, bgcolor, bits_per_line / BITS_PER_LONG, reverse); return; } } if (bpp > 0 && bpp <= BITS_PER_BYTE) fb_bitmap4x_imageblit(image, dst, fgcolor, bgcolor, bpp, bits_per_line, reverse); else if (bpp > BITS_PER_BYTE && bpp <= BITS_PER_LONG) fb_bitmap1x_imageblit(image, dst, fgcolor, bgcolor, bpp, bits_per_line, reverse); } static inline void fb_imageblit(struct fb_info *p, const struct fb_image *image) { int bpp = p->var.bits_per_pixel; unsigned int bits_per_line = BYTES_TO_BITS(p->fix.line_length); struct fb_address dst = fb_address_init(p); struct fb_reverse reverse = fb_reverse_init(p); const u32 *palette = fb_palette(p); fb_address_forward(&dst, image->dy * bits_per_line + image->dx * bpp); if (image->depth == 1) fb_bitmap_imageblit(image, &dst, bits_per_line, palette, bpp, reverse); else fb_color_imageblit(image, &dst, bits_per_line, palette, bpp, reverse); }
14 2 13 13 13 11 11 11 11 11 11 11 11 11 11 11 11 11 13 4 4 4 13 13 13 11 11 11 7 7 6 7 7 13 2 13 13 13 13 7 7 13 13 13 13 13 13 11 11 11 11 11 4 11 11 4 4 13 2 2 2 2 2 2 5 2 3 3 4 2 2 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved * Copyright 2001-2006 Ian Kent <raven@themaw.net> */ #include <linux/sched/signal.h> #include "autofs_i.h" /* We make this a static variable rather than a part of the superblock; it * is better if we don't reassign numbers easily even across filesystems */ static autofs_wqt_t autofs_next_wait_queue = 1; void autofs_catatonic_mode(struct autofs_sb_info *sbi) { struct autofs_wait_queue *wq, *nwq; mutex_lock(&sbi->wq_mutex); if (sbi->flags & AUTOFS_SBI_CATATONIC) { mutex_unlock(&sbi->wq_mutex); return; } pr_debug("entering catatonic mode\n"); sbi->flags |= AUTOFS_SBI_CATATONIC; wq = sbi->queues; sbi->queues = NULL; /* Erase all wait queues */ while (wq) { nwq = wq->next; wq->status = -ENOENT; /* Magic is gone - report failure */ kfree(wq->name.name - wq->offset); wq->name.name = NULL; wake_up(&wq->queue); if (!--wq->wait_ctr) kfree(wq); wq = nwq; } fput(sbi->pipe); /* Close the pipe */ sbi->pipe = NULL; sbi->pipefd = -1; mutex_unlock(&sbi->wq_mutex); } static int autofs_write(struct autofs_sb_info *sbi, struct file *file, const void *addr, int bytes) { unsigned long sigpipe, flags; const char *data = (const char *)addr; ssize_t wr = 0; sigpipe = sigismember(&current->pending.signal, SIGPIPE); mutex_lock(&sbi->pipe_mutex); while (bytes) { wr = __kernel_write(file, data, bytes, NULL); if (wr <= 0) break; data += wr; bytes -= wr; } mutex_unlock(&sbi->pipe_mutex); /* Keep the currently executing process from receiving a * SIGPIPE unless it was already supposed to get one */ if (wr == -EPIPE && !sigpipe) { spin_lock_irqsave(&current->sighand->siglock, flags); sigdelset(&current->pending.signal, SIGPIPE); recalc_sigpending(); spin_unlock_irqrestore(&current->sighand->siglock, flags); } /* if 'wr' returned 0 (impossible) we assume -EIO (safe) */ return bytes == 0 ? 0 : wr < 0 ? wr : -EIO; } static void autofs_notify_daemon(struct autofs_sb_info *sbi, struct autofs_wait_queue *wq, int type) { union { struct autofs_packet_hdr hdr; union autofs_packet_union v4_pkt; union autofs_v5_packet_union v5_pkt; } pkt; struct file *pipe = NULL; size_t pktsz; int ret; pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n", (unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, type); memset(&pkt, 0, sizeof(pkt)); /* For security reasons */ pkt.hdr.proto_version = sbi->version; pkt.hdr.type = type; switch (type) { /* Kernel protocol v4 missing and expire packets */ case autofs_ptype_missing: { struct autofs_packet_missing *mp = &pkt.v4_pkt.missing; pktsz = sizeof(*mp); mp->wait_queue_token = wq->wait_queue_token; mp->len = wq->name.len; memcpy(mp->name, wq->name.name, wq->name.len); mp->name[wq->name.len] = '\0'; break; } case autofs_ptype_expire_multi: { struct autofs_packet_expire_multi *ep = &pkt.v4_pkt.expire_multi; pktsz = sizeof(*ep); ep->wait_queue_token = wq->wait_queue_token; ep->len = wq->name.len; memcpy(ep->name, wq->name.name, wq->name.len); ep->name[wq->name.len] = '\0'; break; } /* * Kernel protocol v5 packet for handling indirect and direct * mount missing and expire requests */ case autofs_ptype_missing_indirect: case autofs_ptype_expire_indirect: case autofs_ptype_missing_direct: case autofs_ptype_expire_direct: { struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet; struct user_namespace *user_ns = sbi->pipe->f_cred->user_ns; pktsz = sizeof(*packet); packet->wait_queue_token = wq->wait_queue_token; packet->len = wq->name.len; memcpy(packet->name, wq->name.name, wq->name.len); packet->name[wq->name.len] = '\0'; packet->dev = wq->dev; packet->ino = wq->ino; packet->uid = from_kuid_munged(user_ns, wq->uid); packet->gid = from_kgid_munged(user_ns, wq->gid); packet->pid = wq->pid; packet->tgid = wq->tgid; break; } default: pr_warn("bad type %d!\n", type); mutex_unlock(&sbi->wq_mutex); return; } pipe = get_file(sbi->pipe); mutex_unlock(&sbi->wq_mutex); switch (ret = autofs_write(sbi, pipe, &pkt, pktsz)) { case 0: break; case -ENOMEM: case -ERESTARTSYS: /* Just fail this one */ autofs_wait_release(sbi, wq->wait_queue_token, ret); break; default: autofs_catatonic_mode(sbi); break; } fput(pipe); } static struct autofs_wait_queue * autofs_find_wait(struct autofs_sb_info *sbi, const struct qstr *qstr) { struct autofs_wait_queue *wq; for (wq = sbi->queues; wq; wq = wq->next) { if (wq->name.hash == qstr->hash && wq->name.len == qstr->len && wq->name.name && !memcmp(wq->name.name, qstr->name, qstr->len)) break; } return wq; } /* * Check if we have a valid request. * Returns * 1 if the request should continue. * In this case we can return an autofs_wait_queue entry if one is * found or NULL to idicate a new wait needs to be created. * 0 or a negative errno if the request shouldn't continue. */ static int validate_request(struct autofs_wait_queue **wait, struct autofs_sb_info *sbi, const struct qstr *qstr, const struct path *path, enum autofs_notify notify) { struct dentry *dentry = path->dentry; struct autofs_wait_queue *wq; struct autofs_info *ino; if (sbi->flags & AUTOFS_SBI_CATATONIC) return -ENOENT; /* Wait in progress, continue; */ wq = autofs_find_wait(sbi, qstr); if (wq) { *wait = wq; return 1; } *wait = NULL; /* If we don't yet have any info this is a new request */ ino = autofs_dentry_ino(dentry); if (!ino) return 1; /* * If we've been asked to wait on an existing expire (NFY_NONE) * but there is no wait in the queue ... */ if (notify == NFY_NONE) { /* * Either we've betean the pending expire to post it's * wait or it finished while we waited on the mutex. * So we need to wait till either, the wait appears * or the expire finishes. */ while (ino->flags & AUTOFS_INF_EXPIRING) { mutex_unlock(&sbi->wq_mutex); schedule_timeout_interruptible(HZ/10); if (mutex_lock_interruptible(&sbi->wq_mutex)) return -EINTR; if (sbi->flags & AUTOFS_SBI_CATATONIC) return -ENOENT; wq = autofs_find_wait(sbi, qstr); if (wq) { *wait = wq; return 1; } } /* * Not ideal but the status has already gone. Of the two * cases where we wait on NFY_NONE neither depend on the * return status of the wait. */ return 0; } /* * If we've been asked to trigger a mount and the request * completed while we waited on the mutex ... */ if (notify == NFY_MOUNT) { struct dentry *new = NULL; struct path this; int valid = 1; /* * If the dentry was successfully mounted while we slept * on the wait queue mutex we can return success. If it * isn't mounted (doesn't have submounts for the case of * a multi-mount with no mount at it's base) we can * continue on and create a new request. */ if (!IS_ROOT(dentry)) { if (d_unhashed(dentry) && d_really_is_positive(dentry)) { struct dentry *parent = dentry->d_parent; new = d_lookup(parent, &dentry->d_name); if (new) dentry = new; } } this.mnt = path->mnt; this.dentry = dentry; if (path_has_submounts(&this)) valid = 0; if (new) dput(new); return valid; } return 1; } int autofs_wait(struct autofs_sb_info *sbi, const struct path *path, enum autofs_notify notify) { struct dentry *dentry = path->dentry; struct autofs_wait_queue *wq; struct qstr qstr; char *name; int status, ret, type; unsigned int offset = 0; pid_t pid; pid_t tgid; /* In catatonic mode, we don't wait for nobody */ if (sbi->flags & AUTOFS_SBI_CATATONIC) return -ENOENT; /* * Try translating pids to the namespace of the daemon. * * Zero means failure: we are in an unrelated pid namespace. */ pid = task_pid_nr_ns(current, ns_of_pid(sbi->oz_pgrp)); tgid = task_tgid_nr_ns(current, ns_of_pid(sbi->oz_pgrp)); if (pid == 0 || tgid == 0) return -ENOENT; if (d_really_is_negative(dentry)) { /* * A wait for a negative dentry is invalid for certain * cases. A direct or offset mount "always" has its mount * point directory created and so the request dentry must * be positive or the map key doesn't exist. The situation * is very similar for indirect mounts except only dentrys * in the root of the autofs file system may be negative. */ if (autofs_type_trigger(sbi->type)) return -ENOENT; else if (!IS_ROOT(dentry->d_parent)) return -ENOENT; } name = kmalloc(NAME_MAX + 1, GFP_KERNEL); if (!name) return -ENOMEM; /* If this is a direct mount request create a dummy name */ if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type)) { qstr.name = name; qstr.len = sprintf(name, "%p", dentry); } else { char *p = dentry_path_raw(dentry, name, NAME_MAX); if (IS_ERR(p)) { kfree(name); return -ENOENT; } qstr.name = ++p; // skip the leading slash qstr.len = strlen(p); offset = p - name; } qstr.hash = full_name_hash(dentry, qstr.name, qstr.len); if (mutex_lock_interruptible(&sbi->wq_mutex)) { kfree(name); return -EINTR; } ret = validate_request(&wq, sbi, &qstr, path, notify); if (ret <= 0) { if (ret != -EINTR) mutex_unlock(&sbi->wq_mutex); kfree(name); return ret; } if (!wq) { /* Create a new wait queue */ wq = kmalloc(sizeof(struct autofs_wait_queue), GFP_KERNEL); if (!wq) { kfree(name); mutex_unlock(&sbi->wq_mutex); return -ENOMEM; } wq->wait_queue_token = autofs_next_wait_queue; if (++autofs_next_wait_queue == 0) autofs_next_wait_queue = 1; wq->next = sbi->queues; sbi->queues = wq; init_waitqueue_head(&wq->queue); memcpy(&wq->name, &qstr, sizeof(struct qstr)); wq->offset = offset; wq->dev = autofs_get_dev(sbi); wq->ino = autofs_get_ino(sbi); wq->uid = current_uid(); wq->gid = current_gid(); wq->pid = pid; wq->tgid = tgid; wq->status = -EINTR; /* Status return if interrupted */ wq->wait_ctr = 2; if (sbi->version < 5) { if (notify == NFY_MOUNT) type = autofs_ptype_missing; else type = autofs_ptype_expire_multi; } else { if (notify == NFY_MOUNT) type = autofs_type_trigger(sbi->type) ? autofs_ptype_missing_direct : autofs_ptype_missing_indirect; else type = autofs_type_trigger(sbi->type) ? autofs_ptype_expire_direct : autofs_ptype_expire_indirect; } pr_debug("new wait id = 0x%08lx, name = %.*s, nfy=%d\n", (unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, notify); /* * autofs_notify_daemon() may block; it will unlock ->wq_mutex */ autofs_notify_daemon(sbi, wq, type); } else { wq->wait_ctr++; pr_debug("existing wait id = 0x%08lx, name = %.*s, nfy=%d\n", (unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, notify); mutex_unlock(&sbi->wq_mutex); kfree(name); } /* * wq->name.name is NULL iff the lock is already released * or the mount has been made catatonic. */ wait_event_killable(wq->queue, wq->name.name == NULL); status = wq->status; /* * For direct and offset mounts we need to track the requester's * uid and gid in the dentry info struct. This is so it can be * supplied, on request, by the misc device ioctl interface. * This is needed during daemon resatart when reconnecting * to existing, active, autofs mounts. The uid and gid (and * related string values) may be used for macro substitution * in autofs mount maps. */ if (!status) { struct autofs_info *ino; struct dentry *de = NULL; /* direct mount or browsable map */ ino = autofs_dentry_ino(dentry); if (!ino) { /* If not lookup actual dentry used */ de = d_lookup(dentry->d_parent, &dentry->d_name); if (de) ino = autofs_dentry_ino(de); } /* Set mount requester */ if (ino) { spin_lock(&sbi->fs_lock); ino->uid = wq->uid; ino->gid = wq->gid; spin_unlock(&sbi->fs_lock); } if (de) dput(de); } /* Are we the last process to need status? */ mutex_lock(&sbi->wq_mutex); if (!--wq->wait_ctr) kfree(wq); mutex_unlock(&sbi->wq_mutex); return status; } int autofs_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_token, int status) { struct autofs_wait_queue *wq, **wql; mutex_lock(&sbi->wq_mutex); for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) { if (wq->wait_queue_token == wait_queue_token) break; } if (!wq) { mutex_unlock(&sbi->wq_mutex); return -EINVAL; } *wql = wq->next; /* Unlink from chain */ kfree(wq->name.name - wq->offset); wq->name.name = NULL; /* Do not wait on this queue */ wq->status = status; wake_up(&wq->queue); if (!--wq->wait_ctr) kfree(wq); mutex_unlock(&sbi->wq_mutex); return 0; }
32 32 65 65 4 1 4 4 4 4 4 4 1 4 4 4 4 4 82 71 71 57 404 404 403 870 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 // SPDX-License-Identifier: GPL-2.0-only #include <linux/export.h> #include <linux/sched/signal.h> #include <linux/sched/task.h> #include <linux/fs.h> #include <linux/path.h> #include <linux/slab.h> #include <linux/fs_struct.h> #include "internal.h" /* * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values. * It can block. */ void set_fs_root(struct fs_struct *fs, const struct path *path) { struct path old_root; path_get(path); write_seqlock(&fs->seq); old_root = fs->root; fs->root = *path; write_sequnlock(&fs->seq); if (old_root.dentry) path_put(&old_root); } /* * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values. * It can block. */ void set_fs_pwd(struct fs_struct *fs, const struct path *path) { struct path old_pwd; path_get(path); write_seqlock(&fs->seq); old_pwd = fs->pwd; fs->pwd = *path; write_sequnlock(&fs->seq); if (old_pwd.dentry) path_put(&old_pwd); } static inline int replace_path(struct path *p, const struct path *old, const struct path *new) { if (likely(p->dentry != old->dentry || p->mnt != old->mnt)) return 0; *p = *new; return 1; } void chroot_fs_refs(const struct path *old_root, const struct path *new_root) { struct task_struct *g, *p; struct fs_struct *fs; int count = 0; read_lock(&tasklist_lock); for_each_process_thread(g, p) { task_lock(p); fs = p->fs; if (fs) { int hits = 0; write_seqlock(&fs->seq); hits += replace_path(&fs->root, old_root, new_root); hits += replace_path(&fs->pwd, old_root, new_root); while (hits--) { count++; path_get(new_root); } write_sequnlock(&fs->seq); } task_unlock(p); } read_unlock(&tasklist_lock); while (count--) path_put(old_root); } void free_fs_struct(struct fs_struct *fs) { path_put(&fs->root); path_put(&fs->pwd); kmem_cache_free(fs_cachep, fs); } void exit_fs(struct task_struct *tsk) { struct fs_struct *fs = tsk->fs; if (fs) { int kill; task_lock(tsk); read_seqlock_excl(&fs->seq); tsk->fs = NULL; kill = !--fs->users; read_sequnlock_excl(&fs->seq); task_unlock(tsk); if (kill) free_fs_struct(fs); } } struct fs_struct *copy_fs_struct(struct fs_struct *old) { struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); /* We don't need to lock fs - think why ;-) */ if (fs) { fs->users = 1; fs->in_exec = 0; seqlock_init(&fs->seq); fs->umask = old->umask; read_seqlock_excl(&old->seq); fs->root = old->root; path_get(&fs->root); fs->pwd = old->pwd; path_get(&fs->pwd); read_sequnlock_excl(&old->seq); } return fs; } int unshare_fs_struct(void) { struct fs_struct *fs = current->fs; struct fs_struct *new_fs = copy_fs_struct(fs); int kill; if (!new_fs) return -ENOMEM; task_lock(current); read_seqlock_excl(&fs->seq); kill = !--fs->users; current->fs = new_fs; read_sequnlock_excl(&fs->seq); task_unlock(current); if (kill) free_fs_struct(fs); return 0; } EXPORT_SYMBOL_GPL(unshare_fs_struct); int current_umask(void) { return current->fs->umask; } EXPORT_SYMBOL(current_umask); /* to be mentioned only in INIT_TASK */ struct fs_struct init_fs = { .users = 1, .seq = __SEQLOCK_UNLOCKED(init_fs.seq), .umask = 0022, };
1881 1878 1875 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2007-2012 Nicira, Inc. */ #include <linux/netdevice.h> #include <net/genetlink.h> #include <net/netns/generic.h> #include "datapath.h" #include "vport-internal_dev.h" #include "vport-netdev.h" static void dp_detach_port_notify(struct vport *vport) { struct sk_buff *notify; struct datapath *dp; dp = vport->dp; notify = ovs_vport_cmd_build_info(vport, ovs_dp_get_net(dp), 0, 0, OVS_VPORT_CMD_DEL); ovs_dp_detach_port(vport); if (IS_ERR(notify)) { genl_set_err(&dp_vport_genl_family, ovs_dp_get_net(dp), 0, 0, PTR_ERR(notify)); return; } genlmsg_multicast_netns(&dp_vport_genl_family, ovs_dp_get_net(dp), notify, 0, 0, GFP_KERNEL); } void ovs_dp_notify_wq(struct work_struct *work) { struct ovs_net *ovs_net = container_of(work, struct ovs_net, dp_notify_work); struct datapath *dp; ovs_lock(); list_for_each_entry(dp, &ovs_net->dps, list_node) { int i; for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { struct vport *vport; struct hlist_node *n; hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) { if (vport->ops->type == OVS_VPORT_TYPE_INTERNAL) continue; if (!(netif_is_ovs_port(vport->dev))) dp_detach_port_notify(vport); } } } ovs_unlock(); } static int dp_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct ovs_net *ovs_net; struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct vport *vport = NULL; if (!ovs_is_internal_dev(dev)) vport = ovs_netdev_get_vport(dev); if (!vport) return NOTIFY_DONE; if (event == NETDEV_UNREGISTER) { /* upper_dev_unlink and decrement promisc immediately */ ovs_netdev_detach_dev(vport); /* schedule vport destroy, dev_put and genl notification */ ovs_net = net_generic(dev_net(dev), ovs_net_id); queue_work(system_percpu_wq, &ovs_net->dp_notify_work); } return NOTIFY_DONE; } struct notifier_block ovs_dp_device_notifier = { .notifier_call = dp_device_event };
3 4 3 3 3 4 4 7 7 4 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 // SPDX-License-Identifier: GPL-2.0-or-later /* Kernel cryptographic api. * cast6.c - Cast6 cipher algorithm [rfc2612]. * * CAST-256 (*cast6*) is a DES like Substitution-Permutation Network (SPN) * cryptosystem built upon the CAST-128 (*cast5*) [rfc2144] encryption * algorithm. * * Copyright (C) 2003 Kartikey Mahendra Bhatt <kartik_me@hotmail.com>. */ #include <linux/unaligned.h> #include <crypto/algapi.h> #include <linux/init.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <crypto/cast6.h> #define s1 cast_s1 #define s2 cast_s2 #define s3 cast_s3 #define s4 cast_s4 #define F1(D, r, m) ((I = ((m) + (D))), (I = rol32(I, (r))), \ (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff])) #define F2(D, r, m) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \ (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff])) #define F3(D, r, m) ((I = ((m) - (D))), (I = rol32(I, (r))), \ (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff])) static const u32 Tm[24][8] = { { 0x5a827999, 0xc95c653a, 0x383650db, 0xa7103c7c, 0x15ea281d, 0x84c413be, 0xf39dff5f, 0x6277eb00 } , { 0xd151d6a1, 0x402bc242, 0xaf05ade3, 0x1ddf9984, 0x8cb98525, 0xfb9370c6, 0x6a6d5c67, 0xd9474808 } , { 0x482133a9, 0xb6fb1f4a, 0x25d50aeb, 0x94aef68c, 0x0388e22d, 0x7262cdce, 0xe13cb96f, 0x5016a510 } , { 0xbef090b1, 0x2dca7c52, 0x9ca467f3, 0x0b7e5394, 0x7a583f35, 0xe9322ad6, 0x580c1677, 0xc6e60218 } , { 0x35bfedb9, 0xa499d95a, 0x1373c4fb, 0x824db09c, 0xf1279c3d, 0x600187de, 0xcedb737f, 0x3db55f20 } , { 0xac8f4ac1, 0x1b693662, 0x8a432203, 0xf91d0da4, 0x67f6f945, 0xd6d0e4e6, 0x45aad087, 0xb484bc28 } , { 0x235ea7c9, 0x9238936a, 0x01127f0b, 0x6fec6aac, 0xdec6564d, 0x4da041ee, 0xbc7a2d8f, 0x2b541930 } , { 0x9a2e04d1, 0x0907f072, 0x77e1dc13, 0xe6bbc7b4, 0x5595b355, 0xc46f9ef6, 0x33498a97, 0xa2237638 } , { 0x10fd61d9, 0x7fd74d7a, 0xeeb1391b, 0x5d8b24bc, 0xcc65105d, 0x3b3efbfe, 0xaa18e79f, 0x18f2d340 } , { 0x87ccbee1, 0xf6a6aa82, 0x65809623, 0xd45a81c4, 0x43346d65, 0xb20e5906, 0x20e844a7, 0x8fc23048 } , { 0xfe9c1be9, 0x6d76078a, 0xdc4ff32b, 0x4b29decc, 0xba03ca6d, 0x28ddb60e, 0x97b7a1af, 0x06918d50 } , { 0x756b78f1, 0xe4456492, 0x531f5033, 0xc1f93bd4, 0x30d32775, 0x9fad1316, 0x0e86feb7, 0x7d60ea58 } , { 0xec3ad5f9, 0x5b14c19a, 0xc9eead3b, 0x38c898dc, 0xa7a2847d, 0x167c701e, 0x85565bbf, 0xf4304760 } , { 0x630a3301, 0xd1e41ea2, 0x40be0a43, 0xaf97f5e4, 0x1e71e185, 0x8d4bcd26, 0xfc25b8c7, 0x6affa468 } , { 0xd9d99009, 0x48b37baa, 0xb78d674b, 0x266752ec, 0x95413e8d, 0x041b2a2e, 0x72f515cf, 0xe1cf0170 } , { 0x50a8ed11, 0xbf82d8b2, 0x2e5cc453, 0x9d36aff4, 0x0c109b95, 0x7aea8736, 0xe9c472d7, 0x589e5e78 } , { 0xc7784a19, 0x365235ba, 0xa52c215b, 0x14060cfc, 0x82dff89d, 0xf1b9e43e, 0x6093cfdf, 0xcf6dbb80 } , { 0x3e47a721, 0xad2192c2, 0x1bfb7e63, 0x8ad56a04, 0xf9af55a5, 0x68894146, 0xd7632ce7, 0x463d1888 } , { 0xb5170429, 0x23f0efca, 0x92cadb6b, 0x01a4c70c, 0x707eb2ad, 0xdf589e4e, 0x4e3289ef, 0xbd0c7590 } , { 0x2be66131, 0x9ac04cd2, 0x099a3873, 0x78742414, 0xe74e0fb5, 0x5627fb56, 0xc501e6f7, 0x33dbd298 } , { 0xa2b5be39, 0x118fa9da, 0x8069957b, 0xef43811c, 0x5e1d6cbd, 0xccf7585e, 0x3bd143ff, 0xaaab2fa0 } , { 0x19851b41, 0x885f06e2, 0xf738f283, 0x6612de24, 0xd4ecc9c5, 0x43c6b566, 0xb2a0a107, 0x217a8ca8 } , { 0x90547849, 0xff2e63ea, 0x6e084f8b, 0xdce23b2c, 0x4bbc26cd, 0xba96126e, 0x296ffe0f, 0x9849e9b0 } , { 0x0723d551, 0x75fdc0f2, 0xe4d7ac93, 0x53b19834, 0xc28b83d5, 0x31656f76, 0xa03f5b17, 0x0f1946b8 } }; static const u8 Tr[4][8] = { { 0x13, 0x04, 0x15, 0x06, 0x17, 0x08, 0x19, 0x0a } , { 0x1b, 0x0c, 0x1d, 0x0e, 0x1f, 0x10, 0x01, 0x12 } , { 0x03, 0x14, 0x05, 0x16, 0x07, 0x18, 0x09, 0x1a } , { 0x0b, 0x1c, 0x0d, 0x1e, 0x0f, 0x00, 0x11, 0x02 } }; /* forward octave */ static inline void W(u32 *key, unsigned int i) { u32 I; key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]); key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]); key[4] ^= F3(key[5], Tr[i % 4][2], Tm[i][2]); key[3] ^= F1(key[4], Tr[i % 4][3], Tm[i][3]); key[2] ^= F2(key[3], Tr[i % 4][4], Tm[i][4]); key[1] ^= F3(key[2], Tr[i % 4][5], Tm[i][5]); key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]); key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]); } int __cast6_setkey(struct cast6_ctx *c, const u8 *in_key, unsigned int key_len) { int i; u32 key[8]; __be32 p_key[8]; /* padded key */ if (key_len % 4 != 0) return -EINVAL; memset(p_key, 0, 32); memcpy(p_key, in_key, key_len); key[0] = be32_to_cpu(p_key[0]); /* A */ key[1] = be32_to_cpu(p_key[1]); /* B */ key[2] = be32_to_cpu(p_key[2]); /* C */ key[3] = be32_to_cpu(p_key[3]); /* D */ key[4] = be32_to_cpu(p_key[4]); /* E */ key[5] = be32_to_cpu(p_key[5]); /* F */ key[6] = be32_to_cpu(p_key[6]); /* G */ key[7] = be32_to_cpu(p_key[7]); /* H */ for (i = 0; i < 12; i++) { W(key, 2 * i); W(key, 2 * i + 1); c->Kr[i][0] = key[0] & 0x1f; c->Kr[i][1] = key[2] & 0x1f; c->Kr[i][2] = key[4] & 0x1f; c->Kr[i][3] = key[6] & 0x1f; c->Km[i][0] = key[7]; c->Km[i][1] = key[5]; c->Km[i][2] = key[3]; c->Km[i][3] = key[1]; } return 0; } EXPORT_SYMBOL_GPL(__cast6_setkey); int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { return __cast6_setkey(crypto_tfm_ctx(tfm), key, keylen); } EXPORT_SYMBOL_GPL(cast6_setkey); /*forward quad round*/ static inline void Q(u32 *block, const u8 *Kr, const u32 *Km) { u32 I; block[2] ^= F1(block[3], Kr[0], Km[0]); block[1] ^= F2(block[2], Kr[1], Km[1]); block[0] ^= F3(block[1], Kr[2], Km[2]); block[3] ^= F1(block[0], Kr[3], Km[3]); } /*reverse quad round*/ static inline void QBAR(u32 *block, const u8 *Kr, const u32 *Km) { u32 I; block[3] ^= F1(block[0], Kr[3], Km[3]); block[0] ^= F3(block[1], Kr[2], Km[2]); block[1] ^= F2(block[2], Kr[1], Km[1]); block[2] ^= F1(block[3], Kr[0], Km[0]); } void __cast6_encrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) { const struct cast6_ctx *c = ctx; u32 block[4]; const u32 *Km; const u8 *Kr; block[0] = get_unaligned_be32(inbuf); block[1] = get_unaligned_be32(inbuf + 4); block[2] = get_unaligned_be32(inbuf + 8); block[3] = get_unaligned_be32(inbuf + 12); Km = c->Km[0]; Kr = c->Kr[0]; Q(block, Kr, Km); Km = c->Km[1]; Kr = c->Kr[1]; Q(block, Kr, Km); Km = c->Km[2]; Kr = c->Kr[2]; Q(block, Kr, Km); Km = c->Km[3]; Kr = c->Kr[3]; Q(block, Kr, Km); Km = c->Km[4]; Kr = c->Kr[4]; Q(block, Kr, Km); Km = c->Km[5]; Kr = c->Kr[5]; Q(block, Kr, Km); Km = c->Km[6]; Kr = c->Kr[6]; QBAR(block, Kr, Km); Km = c->Km[7]; Kr = c->Kr[7]; QBAR(block, Kr, Km); Km = c->Km[8]; Kr = c->Kr[8]; QBAR(block, Kr, Km); Km = c->Km[9]; Kr = c->Kr[9]; QBAR(block, Kr, Km); Km = c->Km[10]; Kr = c->Kr[10]; QBAR(block, Kr, Km); Km = c->Km[11]; Kr = c->Kr[11]; QBAR(block, Kr, Km); put_unaligned_be32(block[0], outbuf); put_unaligned_be32(block[1], outbuf + 4); put_unaligned_be32(block[2], outbuf + 8); put_unaligned_be32(block[3], outbuf + 12); } EXPORT_SYMBOL_GPL(__cast6_encrypt); static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { __cast6_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf); } void __cast6_decrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) { const struct cast6_ctx *c = ctx; u32 block[4]; const u32 *Km; const u8 *Kr; block[0] = get_unaligned_be32(inbuf); block[1] = get_unaligned_be32(inbuf + 4); block[2] = get_unaligned_be32(inbuf + 8); block[3] = get_unaligned_be32(inbuf + 12); Km = c->Km[11]; Kr = c->Kr[11]; Q(block, Kr, Km); Km = c->Km[10]; Kr = c->Kr[10]; Q(block, Kr, Km); Km = c->Km[9]; Kr = c->Kr[9]; Q(block, Kr, Km); Km = c->Km[8]; Kr = c->Kr[8]; Q(block, Kr, Km); Km = c->Km[7]; Kr = c->Kr[7]; Q(block, Kr, Km); Km = c->Km[6]; Kr = c->Kr[6]; Q(block, Kr, Km); Km = c->Km[5]; Kr = c->Kr[5]; QBAR(block, Kr, Km); Km = c->Km[4]; Kr = c->Kr[4]; QBAR(block, Kr, Km); Km = c->Km[3]; Kr = c->Kr[3]; QBAR(block, Kr, Km); Km = c->Km[2]; Kr = c->Kr[2]; QBAR(block, Kr, Km); Km = c->Km[1]; Kr = c->Kr[1]; QBAR(block, Kr, Km); Km = c->Km[0]; Kr = c->Kr[0]; QBAR(block, Kr, Km); put_unaligned_be32(block[0], outbuf); put_unaligned_be32(block[1], outbuf + 4); put_unaligned_be32(block[2], outbuf + 8); put_unaligned_be32(block[3], outbuf + 12); } EXPORT_SYMBOL_GPL(__cast6_decrypt); static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { __cast6_decrypt(crypto_tfm_ctx(tfm), outbuf, inbuf); } static struct crypto_alg alg = { .cra_name = "cast6", .cra_driver_name = "cast6-generic", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAST6_BLOCK_SIZE, .cra_ctxsize = sizeof(struct cast6_ctx), .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = CAST6_MIN_KEY_SIZE, .cia_max_keysize = CAST6_MAX_KEY_SIZE, .cia_setkey = cast6_setkey, .cia_encrypt = cast6_encrypt, .cia_decrypt = cast6_decrypt} } }; static int __init cast6_mod_init(void) { return crypto_register_alg(&alg); } static void __exit cast6_mod_fini(void) { crypto_unregister_alg(&alg); } module_init(cast6_mod_init); module_exit(cast6_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cast6 Cipher Algorithm"); MODULE_ALIAS_CRYPTO("cast6"); MODULE_ALIAS_CRYPTO("cast6-generic");
22 23 5 3 4 5 10 4 7 7 6 19 3 3 42 27 29 23 6 17 17 17 17 17 17 13 11 6 9 23 23 24 24 22 23 23 10 4 19 17 17 209 36 208 7 5 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 // SPDX-License-Identifier: GPL-2.0 /* * Supplementary group IDs */ #include <linux/cred.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/sort.h> #include <linux/syscalls.h> #include <linux/user_namespace.h> #include <linux/vmalloc.h> #include <linux/uaccess.h> struct group_info *groups_alloc(int gidsetsize) { struct group_info *gi; gi = kvmalloc(struct_size(gi, gid, gidsetsize), GFP_KERNEL_ACCOUNT); if (!gi) return NULL; refcount_set(&gi->usage, 1); gi->ngroups = gidsetsize; return gi; } EXPORT_SYMBOL(groups_alloc); void groups_free(struct group_info *group_info) { kvfree(group_info); } EXPORT_SYMBOL(groups_free); /* export the group_info to a user-space array */ static int groups_to_user(gid_t __user *grouplist, const struct group_info *group_info) { struct user_namespace *user_ns = current_user_ns(); int i; unsigned int count = group_info->ngroups; for (i = 0; i < count; i++) { gid_t gid; gid = from_kgid_munged(user_ns, group_info->gid[i]); if (put_user(gid, grouplist+i)) return -EFAULT; } return 0; } /* fill a group_info from a user-space array - it must be allocated already */ static int groups_from_user(struct group_info *group_info, gid_t __user *grouplist) { struct user_namespace *user_ns = current_user_ns(); int i; unsigned int count = group_info->ngroups; for (i = 0; i < count; i++) { gid_t gid; kgid_t kgid; if (get_user(gid, grouplist+i)) return -EFAULT; kgid = make_kgid(user_ns, gid); if (!gid_valid(kgid)) return -EINVAL; group_info->gid[i] = kgid; } return 0; } static int gid_cmp(const void *_a, const void *_b) { kgid_t a = *(kgid_t *)_a; kgid_t b = *(kgid_t *)_b; return gid_gt(a, b) - gid_lt(a, b); } void groups_sort(struct group_info *group_info) { sort(group_info->gid, group_info->ngroups, sizeof(*group_info->gid), gid_cmp, NULL); } EXPORT_SYMBOL(groups_sort); /* a simple bsearch */ int groups_search(const struct group_info *group_info, kgid_t grp) { unsigned int left, right; if (!group_info) return 0; left = 0; right = group_info->ngroups; while (left < right) { unsigned int mid = (left+right)/2; if (gid_gt(grp, group_info->gid[mid])) left = mid + 1; else if (gid_lt(grp, group_info->gid[mid])) right = mid; else return 1; } return 0; } /** * set_groups - Change a group subscription in a set of credentials * @new: The newly prepared set of credentials to alter * @group_info: The group list to install */ void set_groups(struct cred *new, struct group_info *group_info) { put_group_info(new->group_info); get_group_info(group_info); new->group_info = group_info; } EXPORT_SYMBOL(set_groups); /** * set_current_groups - Change current's group subscription * @group_info: The group list to impose * * Validate a group subscription and, if valid, impose it upon current's task * security record. */ int set_current_groups(struct group_info *group_info) { struct cred *new; const struct cred *old; int retval; new = prepare_creds(); if (!new) return -ENOMEM; old = current_cred(); set_groups(new, group_info); retval = security_task_fix_setgroups(new, old); if (retval < 0) goto error; return commit_creds(new); error: abort_creds(new); return retval; } EXPORT_SYMBOL(set_current_groups); SYSCALL_DEFINE2(getgroups, int, gidsetsize, gid_t __user *, grouplist) { const struct cred *cred = current_cred(); int i; if (gidsetsize < 0) return -EINVAL; /* no need to grab task_lock here; it cannot change */ i = cred->group_info->ngroups; if (gidsetsize) { if (i > gidsetsize) { i = -EINVAL; goto out; } if (groups_to_user(grouplist, cred->group_info)) { i = -EFAULT; goto out; } } out: return i; } bool may_setgroups(void) { struct user_namespace *user_ns = current_user_ns(); return ns_capable_setid(user_ns, CAP_SETGID) && userns_may_setgroups(user_ns); } /* * SMP: Our groups are copy-on-write. We can set them safely * without another task interfering. */ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist) { struct group_info *group_info; int retval; if (!may_setgroups()) return -EPERM; if ((unsigned)gidsetsize > NGROUPS_MAX) return -EINVAL; group_info = groups_alloc(gidsetsize); if (!group_info) return -ENOMEM; retval = groups_from_user(group_info, grouplist); if (retval) { put_group_info(group_info); return retval; } groups_sort(group_info); retval = set_current_groups(group_info); put_group_info(group_info); return retval; } /* * Check whether we're fsgid/egid or in the supplemental group.. */ int in_group_p(kgid_t grp) { const struct cred *cred = current_cred(); int retval = 1; if (!gid_eq(grp, cred->fsgid)) retval = groups_search(cred->group_info, grp); return retval; } EXPORT_SYMBOL(in_group_p); int in_egroup_p(kgid_t grp) { const struct cred *cred = current_cred(); int retval = 1; if (!gid_eq(grp, cred->egid)) retval = groups_search(cred->group_info, grp); return retval; } EXPORT_SYMBOL(in_egroup_p);
62 62 62 63 63 63 36 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 // SPDX-License-Identifier: GPL-2.0-or-later /****************************************************************************** * vlanproc.c VLAN Module. /proc filesystem interface. * * This module is completely hardware-independent and provides * access to the router using Linux /proc filesystem. * * Author: Ben Greear, <greearb@candelatech.com> coppied from wanproc.c * by: Gene Kozin <genek@compuserve.com> * * Copyright: (c) 1998 Ben Greear * * ============================================================================ * Jan 20, 1998 Ben Greear Initial Version *****************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/fs.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include "vlanproc.h" #include "vlan.h" /****** Function Prototypes *************************************************/ /* Methods for preparing data for reading proc entries */ static int vlan_seq_show(struct seq_file *seq, void *v); static void *vlan_seq_start(struct seq_file *seq, loff_t *pos); static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos); static void vlan_seq_stop(struct seq_file *seq, void *); static int vlandev_seq_show(struct seq_file *seq, void *v); /* * Global Data */ /* * Names of the proc directory entries */ static const char name_root[] = "vlan"; static const char name_conf[] = "config"; /* * Structures for interfacing with the /proc filesystem. * VLAN creates its own directory /proc/net/vlan with the following * entries: * config device status/configuration * <device> entry for each device */ /* * Generic /proc/net/vlan/<file> file and inode operations */ static const struct seq_operations vlan_seq_ops = { .start = vlan_seq_start, .next = vlan_seq_next, .stop = vlan_seq_stop, .show = vlan_seq_show, }; /* * Proc filesystem directory entries. */ /* Strings */ static const char *const vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = { [VLAN_NAME_TYPE_RAW_PLUS_VID] = "VLAN_NAME_TYPE_RAW_PLUS_VID", [VLAN_NAME_TYPE_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_PLUS_VID_NO_PAD", [VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD", [VLAN_NAME_TYPE_PLUS_VID] = "VLAN_NAME_TYPE_PLUS_VID", }; /* * Interface functions */ /* * Clean up /proc/net/vlan entries */ void vlan_proc_cleanup(struct net *net) { struct vlan_net *vn = net_generic(net, vlan_net_id); if (vn->proc_vlan_conf) remove_proc_entry(name_conf, vn->proc_vlan_dir); if (vn->proc_vlan_dir) remove_proc_entry(name_root, net->proc_net); /* Dynamically added entries should be cleaned up as their vlan_device * is removed, so we should not have to take care of it here... */ } /* * Create /proc/net/vlan entries */ int __net_init vlan_proc_init(struct net *net) { struct vlan_net *vn = net_generic(net, vlan_net_id); vn->proc_vlan_dir = proc_net_mkdir(net, name_root, net->proc_net); if (!vn->proc_vlan_dir) goto err; vn->proc_vlan_conf = proc_create_net(name_conf, S_IFREG | 0600, vn->proc_vlan_dir, &vlan_seq_ops, sizeof(struct seq_net_private)); if (!vn->proc_vlan_conf) goto err; return 0; err: pr_err("can't create entry in proc filesystem!\n"); vlan_proc_cleanup(net); return -ENOBUFS; } /* * Add directory entry for VLAN device. */ int vlan_proc_add_dev(struct net_device *vlandev) { struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id); if (!strcmp(vlandev->name, name_conf)) return -EINVAL; vlan->dent = proc_create_single_data(vlandev->name, S_IFREG | 0600, vn->proc_vlan_dir, vlandev_seq_show, vlandev); if (!vlan->dent) return -ENOBUFS; return 0; } /* * Delete directory entry for VLAN device. */ void vlan_proc_rem_dev(struct net_device *vlandev) { /** NOTE: This will consume the memory pointed to by dent, it seems. */ proc_remove(vlan_dev_priv(vlandev)->dent); vlan_dev_priv(vlandev)->dent = NULL; } /****** Proc filesystem entry points ****************************************/ /* * The following few functions build the content of /proc/net/vlan/config */ static void *vlan_seq_from_index(struct seq_file *seq, loff_t *pos) { unsigned long ifindex = *pos; struct net_device *dev; for_each_netdev_dump(seq_file_net(seq), dev, ifindex) { if (!is_vlan_dev(dev)) continue; *pos = dev->ifindex; return dev; } return NULL; } static void *vlan_seq_start(struct seq_file *seq, loff_t *pos) __acquires(rcu) { rcu_read_lock(); if (*pos == 0) return SEQ_START_TOKEN; return vlan_seq_from_index(seq, pos); } static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return vlan_seq_from_index(seq, pos); } static void vlan_seq_stop(struct seq_file *seq, void *v) __releases(rcu) { rcu_read_unlock(); } static int vlan_seq_show(struct seq_file *seq, void *v) { struct net *net = seq_file_net(seq); struct vlan_net *vn = net_generic(net, vlan_net_id); if (v == SEQ_START_TOKEN) { const char *nmtype = NULL; seq_puts(seq, "VLAN Dev name | VLAN ID\n"); if (vn->name_type < ARRAY_SIZE(vlan_name_type_str)) nmtype = vlan_name_type_str[vn->name_type]; seq_printf(seq, "Name-Type: %s\n", nmtype ? nmtype : "UNKNOWN"); } else { const struct net_device *vlandev = v; const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); seq_printf(seq, "%-15s| %d | %s\n", vlandev->name, vlan->vlan_id, vlan->real_dev->name); } return 0; } static int vlandev_seq_show(struct seq_file *seq, void *offset) { struct net_device *vlandev = (struct net_device *) seq->private; const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats; static const char fmt64[] = "%30s %12llu\n"; int i; if (!is_vlan_dev(vlandev)) return 0; stats = dev_get_stats(vlandev, &temp); seq_printf(seq, "%s VID: %d REORDER_HDR: %i dev->priv_flags: %x\n", vlandev->name, vlan->vlan_id, (int)(vlan->flags & 1), (u32)vlandev->priv_flags); seq_printf(seq, fmt64, "total frames received", stats->rx_packets); seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes); seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast); seq_puts(seq, "\n"); seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets); seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes); seq_printf(seq, "Device: %s", vlan->real_dev->name); /* now show all PRIORITY mappings relating to this VLAN */ seq_printf(seq, "\nINGRESS priority mappings: " "0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u 7:%u\n", vlan->ingress_priority_map[0], vlan->ingress_priority_map[1], vlan->ingress_priority_map[2], vlan->ingress_priority_map[3], vlan->ingress_priority_map[4], vlan->ingress_priority_map[5], vlan->ingress_priority_map[6], vlan->ingress_priority_map[7]); seq_printf(seq, " EGRESS priority mappings: "); for (i = 0; i < 16; i++) { const struct vlan_priority_tci_mapping *mp = vlan->egress_priority_map[i]; while (mp) { seq_printf(seq, "%u:%d ", mp->priority, ((mp->vlan_qos >> 13) & 0x7)); mp = mp->next; } } seq_puts(seq, "\n"); return 0; }
18 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 /* SPDX-License-Identifier: GPL-2.0-only */ /* * fence-chain: chain fences together in a timeline * * Copyright (C) 2018 Advanced Micro Devices, Inc. * Authors: * Christian König <christian.koenig@amd.com> */ #ifndef __LINUX_DMA_FENCE_CHAIN_H #define __LINUX_DMA_FENCE_CHAIN_H #include <linux/dma-fence.h> #include <linux/irq_work.h> #include <linux/slab.h> /** * struct dma_fence_chain - fence to represent an node of a fence chain * @base: fence base class * @prev: previous fence of the chain * @prev_seqno: original previous seqno before garbage collection * @fence: encapsulated fence * @lock: spinlock for fence handling */ struct dma_fence_chain { struct dma_fence base; struct dma_fence __rcu *prev; u64 prev_seqno; struct dma_fence *fence; union { /** * @cb: callback for signaling * * This is used to add the callback for signaling the * complection of the fence chain. Never used at the same time * as the irq work. */ struct dma_fence_cb cb; /** * @work: irq work item for signaling * * Irq work structure to allow us to add the callback without * running into lock inversion. Never used at the same time as * the callback. */ struct irq_work work; }; spinlock_t lock; }; /** * to_dma_fence_chain - cast a fence to a dma_fence_chain * @fence: fence to cast to a dma_fence_array * * Returns NULL if the fence is not a dma_fence_chain, * or the dma_fence_chain otherwise. */ static inline struct dma_fence_chain * to_dma_fence_chain(struct dma_fence *fence) { if (!fence || !dma_fence_is_chain(fence)) return NULL; return container_of(fence, struct dma_fence_chain, base); } /** * dma_fence_chain_contained - return the contained fence * @fence: the fence to test * * If the fence is a dma_fence_chain the function returns the fence contained * inside the chain object, otherwise it returns the fence itself. */ static inline struct dma_fence * dma_fence_chain_contained(struct dma_fence *fence) { struct dma_fence_chain *chain = to_dma_fence_chain(fence); return chain ? chain->fence : fence; } /** * dma_fence_chain_alloc * * Returns a new struct dma_fence_chain object or NULL on failure. * * This specialized allocator has to be a macro for its allocations to be * accounted separately (to have a separate alloc_tag). The typecast is * intentional to enforce typesafety. */ #define dma_fence_chain_alloc() \ ((struct dma_fence_chain *)kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL)) /** * dma_fence_chain_free * @chain: chain node to free * * Frees up an allocated but not used struct dma_fence_chain object. This * doesn't need an RCU grace period since the fence was never initialized nor * published. After dma_fence_chain_init() has been called the fence must be * released by calling dma_fence_put(), and not through this function. */ static inline void dma_fence_chain_free(struct dma_fence_chain *chain) { kfree(chain); }; /** * dma_fence_chain_for_each - iterate over all fences in chain * @iter: current fence * @head: starting point * * Iterate over all fences in the chain. We keep a reference to the current * fence while inside the loop which must be dropped when breaking out. * * For a deep dive iterator see dma_fence_unwrap_for_each(). */ #define dma_fence_chain_for_each(iter, head) \ for (iter = dma_fence_get(head); iter; \ iter = dma_fence_chain_walk(iter)) struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence); int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno); void dma_fence_chain_init(struct dma_fence_chain *chain, struct dma_fence *prev, struct dma_fence *fence, uint64_t seqno); #endif /* __LINUX_DMA_FENCE_CHAIN_H */
1 5 5 5 5 5 2 2 1 1 2 2 2 2 2 1 1 1 1 1 1 2 2 2 2 2 2 2 2 9 17 9 9 7 5 5 9 11 4 4 4 2 3 1 1 4 1 1 6 6 6 4 5 1 1 6 1 1 7 7 6 5 7 1 3 2 1 4 3 4 3 2 2 1 4 12 12 11 11 11 12 12 12 12 6 6 1 1 3 6 4 4 6 2 6 4 12 6 6 3 3 3 2 3 3 2 2 2 3 2 2 2 2 2 10 10 10 10 10 10 10 8 2 9 5 2 2 2 1 2 7 1 6 1 5 6 11 11 11 10 10 10 10 2 2 8 1 1 7 1 6 6 6 6 3 6 4 3 1 2 2 2 4 16 16 16 4 13 1 1 1 2 2 1 4 4 4 2 4 9 9 9 8 7 5 2 1 1 9 6 3 3 2 1 1 3 1 4 2 4 1 3 2 2 2 4 1 3 1 3 1 6 1 5 1 4 1 3 1 2 1 1 2 1 2 1 2 1 2 1 2 2 1 2 1 2 23 23 22 1 5 6 3 2 5 1 5 5 4 4 3 5 2 2 1 3 3 3 1 5 5 4 3 4 4 4 4 1 4 3 4 3 3 1 1 7 7 6 6 6 6 7 10 10 8 5 4 4 2 2 8 8 4 4 2 2 2 1 1 5 5 4 4 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 // SPDX-License-Identifier: GPL-2.0 /* * cfg80211 - wext compat code * * This is temporary code until all wireless functionality is migrated * into cfg80211, when that happens all the exports here go away and * we directly assign the wireless handlers of wireless interfaces. * * Copyright 2008-2009 Johannes Berg <johannes@sipsolutions.net> * Copyright (C) 2019-2023 Intel Corporation */ #include <linux/export.h> #include <linux/wireless.h> #include <linux/nl80211.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> #include <linux/slab.h> #include <net/iw_handler.h> #include <net/cfg80211.h> #include <net/cfg80211-wext.h> #include "wext-compat.h" #include "core.h" #include "rdev-ops.h" int cfg80211_wext_giwname(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { strcpy(wrqu->name, "IEEE 802.11"); return 0; } int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { __u32 *mode = &wrqu->mode; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev; struct vif_params vifparams; enum nl80211_iftype type; rdev = wiphy_to_rdev(wdev->wiphy); switch (*mode) { case IW_MODE_INFRA: type = NL80211_IFTYPE_STATION; break; case IW_MODE_ADHOC: type = NL80211_IFTYPE_ADHOC; break; case IW_MODE_MONITOR: type = NL80211_IFTYPE_MONITOR; break; default: return -EINVAL; } if (type == wdev->iftype) return 0; memset(&vifparams, 0, sizeof(vifparams)); guard(wiphy)(wdev->wiphy); return cfg80211_change_iface(rdev, dev, type, &vifparams); } int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { __u32 *mode = &wrqu->mode; struct wireless_dev *wdev = dev->ieee80211_ptr; if (!wdev) return -EOPNOTSUPP; switch (wdev->iftype) { case NL80211_IFTYPE_AP: *mode = IW_MODE_MASTER; break; case NL80211_IFTYPE_STATION: *mode = IW_MODE_INFRA; break; case NL80211_IFTYPE_ADHOC: *mode = IW_MODE_ADHOC; break; case NL80211_IFTYPE_MONITOR: *mode = IW_MODE_MONITOR; break; case NL80211_IFTYPE_WDS: *mode = IW_MODE_REPEAT; break; case NL80211_IFTYPE_AP_VLAN: *mode = IW_MODE_SECOND; /* FIXME */ break; default: *mode = IW_MODE_AUTO; break; } return 0; } int cfg80211_wext_giwrange(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_point *data = &wrqu->data; struct wireless_dev *wdev = dev->ieee80211_ptr; struct iw_range *range = (struct iw_range *) extra; enum nl80211_band band; int i, c = 0; if (!wdev) return -EOPNOTSUPP; data->length = sizeof(struct iw_range); memset(range, 0, sizeof(struct iw_range)); range->we_version_compiled = WIRELESS_EXT; range->we_version_source = 21; range->retry_capa = IW_RETRY_LIMIT; range->retry_flags = IW_RETRY_LIMIT; range->min_retry = 0; range->max_retry = 255; range->min_rts = 0; range->max_rts = 2347; range->min_frag = 256; range->max_frag = 2346; range->max_encoding_tokens = 4; range->max_qual.updated = IW_QUAL_NOISE_INVALID; switch (wdev->wiphy->signal_type) { case CFG80211_SIGNAL_TYPE_NONE: break; case CFG80211_SIGNAL_TYPE_MBM: range->max_qual.level = (u8)-110; range->max_qual.qual = 70; range->avg_qual.qual = 35; range->max_qual.updated |= IW_QUAL_DBM; range->max_qual.updated |= IW_QUAL_QUAL_UPDATED; range->max_qual.updated |= IW_QUAL_LEVEL_UPDATED; break; case CFG80211_SIGNAL_TYPE_UNSPEC: range->max_qual.level = 100; range->max_qual.qual = 100; range->avg_qual.qual = 50; range->max_qual.updated |= IW_QUAL_QUAL_UPDATED; range->max_qual.updated |= IW_QUAL_LEVEL_UPDATED; break; } range->avg_qual.level = range->max_qual.level / 2; range->avg_qual.noise = range->max_qual.noise / 2; range->avg_qual.updated = range->max_qual.updated; for (i = 0; i < wdev->wiphy->n_cipher_suites; i++) { switch (wdev->wiphy->cipher_suites[i]) { case WLAN_CIPHER_SUITE_TKIP: range->enc_capa |= (IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_WPA); break; case WLAN_CIPHER_SUITE_CCMP: range->enc_capa |= (IW_ENC_CAPA_CIPHER_CCMP | IW_ENC_CAPA_WPA2); break; case WLAN_CIPHER_SUITE_WEP40: range->encoding_size[range->num_encoding_sizes++] = WLAN_KEY_LEN_WEP40; break; case WLAN_CIPHER_SUITE_WEP104: range->encoding_size[range->num_encoding_sizes++] = WLAN_KEY_LEN_WEP104; break; } } for (band = 0; band < NUM_NL80211_BANDS; band ++) { struct ieee80211_supported_band *sband; sband = wdev->wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels && c < IW_MAX_FREQUENCIES; i++) { struct ieee80211_channel *chan = &sband->channels[i]; if (!(chan->flags & IEEE80211_CHAN_DISABLED)) { range->freq[c].i = ieee80211_frequency_to_channel( chan->center_freq); range->freq[c].m = chan->center_freq; range->freq[c].e = 6; c++; } } } range->num_channels = c; range->num_frequency = c; IW_EVENT_CAPA_SET_KERNEL(range->event_capa); IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); if (wdev->wiphy->max_scan_ssids > 0) range->scan_capa |= IW_SCAN_CAPA_ESSID; return 0; } /** * cfg80211_wext_freq - get wext frequency for non-"auto" * @freq: the wext freq encoding * * Returns: a frequency, or a negative error code, or 0 for auto. */ int cfg80211_wext_freq(struct iw_freq *freq) { /* * Parse frequency - return 0 for auto and * -EINVAL for impossible things. */ if (freq->e == 0) { enum nl80211_band band = NL80211_BAND_2GHZ; if (freq->m < 0) return 0; if (freq->m > 14) band = NL80211_BAND_5GHZ; return ieee80211_channel_to_frequency(freq->m, band); } else { int i, div = 1000000; for (i = 0; i < freq->e; i++) div /= 10; if (div <= 0) return -EINVAL; return freq->m / div; } } int cfg80211_wext_siwrts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *rts = &wrqu->rts; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); u32 orts = wdev->wiphy->rts_threshold; int err; guard(wiphy)(&rdev->wiphy); if (rts->disabled || !rts->fixed) wdev->wiphy->rts_threshold = (u32) -1; else if (rts->value < 0) return -EINVAL; else wdev->wiphy->rts_threshold = rts->value; err = rdev_set_wiphy_params(rdev, -1, WIPHY_PARAM_RTS_THRESHOLD); if (err) wdev->wiphy->rts_threshold = orts; return err; } int cfg80211_wext_giwrts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *rts = &wrqu->rts; struct wireless_dev *wdev = dev->ieee80211_ptr; rts->value = wdev->wiphy->rts_threshold; rts->disabled = rts->value == (u32) -1; rts->fixed = 1; return 0; } int cfg80211_wext_siwfrag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *frag = &wrqu->frag; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); u32 ofrag = wdev->wiphy->frag_threshold; int err; guard(wiphy)(&rdev->wiphy); if (frag->disabled || !frag->fixed) { wdev->wiphy->frag_threshold = (u32) -1; } else if (frag->value < 256) { return -EINVAL; } else { /* Fragment length must be even, so strip LSB. */ wdev->wiphy->frag_threshold = frag->value & ~0x1; } err = rdev_set_wiphy_params(rdev, -1, WIPHY_PARAM_FRAG_THRESHOLD); if (err) wdev->wiphy->frag_threshold = ofrag; return err; } int cfg80211_wext_giwfrag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *frag = &wrqu->frag; struct wireless_dev *wdev = dev->ieee80211_ptr; frag->value = wdev->wiphy->frag_threshold; frag->disabled = frag->value == (u32) -1; frag->fixed = 1; return 0; } static int cfg80211_wext_siwretry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *retry = &wrqu->retry; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); u32 changed = 0; u8 olong = wdev->wiphy->retry_long; u8 oshort = wdev->wiphy->retry_short; int err; if (retry->disabled || retry->value < 1 || retry->value > 255 || (retry->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT) return -EINVAL; guard(wiphy)(&rdev->wiphy); if (retry->flags & IW_RETRY_LONG) { wdev->wiphy->retry_long = retry->value; changed |= WIPHY_PARAM_RETRY_LONG; } else if (retry->flags & IW_RETRY_SHORT) { wdev->wiphy->retry_short = retry->value; changed |= WIPHY_PARAM_RETRY_SHORT; } else { wdev->wiphy->retry_short = retry->value; wdev->wiphy->retry_long = retry->value; changed |= WIPHY_PARAM_RETRY_LONG; changed |= WIPHY_PARAM_RETRY_SHORT; } err = rdev_set_wiphy_params(rdev, -1, changed); if (err) { wdev->wiphy->retry_short = oshort; wdev->wiphy->retry_long = olong; } return err; } int cfg80211_wext_giwretry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *retry = &wrqu->retry; struct wireless_dev *wdev = dev->ieee80211_ptr; retry->disabled = 0; if (retry->flags == 0 || (retry->flags & IW_RETRY_SHORT)) { /* * First return short value, iwconfig will ask long value * later if needed */ retry->flags |= IW_RETRY_LIMIT | IW_RETRY_SHORT; retry->value = wdev->wiphy->retry_short; if (wdev->wiphy->retry_long == wdev->wiphy->retry_short) retry->flags |= IW_RETRY_LONG; return 0; } if (retry->flags & IW_RETRY_LONG) { retry->flags = IW_RETRY_LIMIT | IW_RETRY_LONG; retry->value = wdev->wiphy->retry_long; } return 0; } static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev, struct net_device *dev, bool pairwise, const u8 *addr, bool remove, bool tx_key, int idx, struct key_params *params) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err, i; bool rejoin = false; if (wdev->valid_links) return -EINVAL; if (pairwise && !addr) return -EINVAL; /* * In many cases we won't actually need this, but it's better * to do it first in case the allocation fails. Don't use wext. */ if (!wdev->wext.keys) { wdev->wext.keys = kzalloc(sizeof(*wdev->wext.keys), GFP_KERNEL); if (!wdev->wext.keys) return -ENOMEM; for (i = 0; i < 4; i++) wdev->wext.keys->params[i].key = wdev->wext.keys->data[i]; } if (wdev->iftype != NL80211_IFTYPE_ADHOC && wdev->iftype != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { if (!wdev->connected) return -ENOLINK; if (!rdev->ops->set_default_mgmt_key) return -EOPNOTSUPP; if (idx < 4 || idx > 5) return -EINVAL; } else if (idx < 0 || idx > 3) return -EINVAL; if (remove) { err = 0; if (wdev->connected || (wdev->iftype == NL80211_IFTYPE_ADHOC && wdev->u.ibss.current_bss)) { /* * If removing the current TX key, we will need to * join a new IBSS without the privacy bit clear. */ if (idx == wdev->wext.default_key && wdev->iftype == NL80211_IFTYPE_ADHOC) { cfg80211_leave_ibss(rdev, wdev->netdev, true); rejoin = true; } if (!pairwise && addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) err = -ENOENT; else err = rdev_del_key(rdev, dev, -1, idx, pairwise, addr); } wdev->wext.connect.privacy = false; /* * Applications using wireless extensions expect to be * able to delete keys that don't exist, so allow that. */ if (err == -ENOENT) err = 0; if (!err) { if (!addr && idx < 4) { memset(wdev->wext.keys->data[idx], 0, sizeof(wdev->wext.keys->data[idx])); wdev->wext.keys->params[idx].key_len = 0; wdev->wext.keys->params[idx].cipher = 0; } if (idx == wdev->wext.default_key) wdev->wext.default_key = -1; else if (idx == wdev->wext.default_mgmt_key) wdev->wext.default_mgmt_key = -1; } if (!err && rejoin) err = cfg80211_ibss_wext_join(rdev, wdev); return err; } if (addr) tx_key = false; if (cfg80211_validate_key_settings(rdev, params, idx, pairwise, addr)) return -EINVAL; err = 0; if (wdev->connected || (wdev->iftype == NL80211_IFTYPE_ADHOC && wdev->u.ibss.current_bss)) err = rdev_add_key(rdev, dev, -1, idx, pairwise, addr, params); else if (params->cipher != WLAN_CIPHER_SUITE_WEP40 && params->cipher != WLAN_CIPHER_SUITE_WEP104) return -EINVAL; if (err) return err; /* * We only need to store WEP keys, since they're the only keys that * can be set before a connection is established and persist after * disconnecting. */ if (!addr && (params->cipher == WLAN_CIPHER_SUITE_WEP40 || params->cipher == WLAN_CIPHER_SUITE_WEP104)) { wdev->wext.keys->params[idx] = *params; memcpy(wdev->wext.keys->data[idx], params->key, params->key_len); wdev->wext.keys->params[idx].key = wdev->wext.keys->data[idx]; } if ((params->cipher == WLAN_CIPHER_SUITE_WEP40 || params->cipher == WLAN_CIPHER_SUITE_WEP104) && (tx_key || (!addr && wdev->wext.default_key == -1))) { if (wdev->connected || (wdev->iftype == NL80211_IFTYPE_ADHOC && wdev->u.ibss.current_bss)) { /* * If we are getting a new TX key from not having * had one before we need to join a new IBSS with * the privacy bit set. */ if (wdev->iftype == NL80211_IFTYPE_ADHOC && wdev->wext.default_key == -1) { cfg80211_leave_ibss(rdev, wdev->netdev, true); rejoin = true; } err = rdev_set_default_key(rdev, dev, -1, idx, true, true); } if (!err) { wdev->wext.default_key = idx; if (rejoin) err = cfg80211_ibss_wext_join(rdev, wdev); } return err; } if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC && (tx_key || (!addr && wdev->wext.default_mgmt_key == -1))) { if (wdev->connected || (wdev->iftype == NL80211_IFTYPE_ADHOC && wdev->u.ibss.current_bss)) err = rdev_set_default_mgmt_key(rdev, dev, -1, idx); if (!err) wdev->wext.default_mgmt_key = idx; return err; } return 0; } static int cfg80211_wext_siwencode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *keybuf) { struct iw_point *erq = &wrqu->encoding; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct key_params params; bool remove = false; int idx; if (wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; /* no use -- only MFP (set_default_mgmt_key) is optional */ if (!rdev->ops->del_key || !rdev->ops->add_key || !rdev->ops->set_default_key) return -EOPNOTSUPP; guard(wiphy)(&rdev->wiphy); if (wdev->valid_links) return -EOPNOTSUPP; idx = erq->flags & IW_ENCODE_INDEX; if (idx == 0) { idx = wdev->wext.default_key; if (idx < 0) idx = 0; } else if (idx < 1 || idx > 4) { return -EINVAL; } else { idx--; } if (erq->flags & IW_ENCODE_DISABLED) remove = true; else if (erq->length == 0) { /* No key data - just set the default TX key index */ int err = 0; if (wdev->connected || (wdev->iftype == NL80211_IFTYPE_ADHOC && wdev->u.ibss.current_bss)) err = rdev_set_default_key(rdev, dev, -1, idx, true, true); if (!err) wdev->wext.default_key = idx; return err; } memset(&params, 0, sizeof(params)); params.key = keybuf; params.key_len = erq->length; if (erq->length == 5) params.cipher = WLAN_CIPHER_SUITE_WEP40; else if (erq->length == 13) params.cipher = WLAN_CIPHER_SUITE_WEP104; else if (!remove) return -EINVAL; return cfg80211_set_encryption(rdev, dev, false, NULL, remove, wdev->wext.default_key == -1, idx, &params); } static int cfg80211_wext_siwencodeext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_point *erq = &wrqu->encoding; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct iw_encode_ext *ext = (struct iw_encode_ext *) extra; const u8 *addr; int idx; bool remove = false; struct key_params params; u32 cipher; if (wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; /* no use -- only MFP (set_default_mgmt_key) is optional */ if (!rdev->ops->del_key || !rdev->ops->add_key || !rdev->ops->set_default_key) return -EOPNOTSUPP; if (wdev->valid_links) return -EOPNOTSUPP; switch (ext->alg) { case IW_ENCODE_ALG_NONE: remove = true; cipher = 0; break; case IW_ENCODE_ALG_WEP: if (ext->key_len == 5) cipher = WLAN_CIPHER_SUITE_WEP40; else if (ext->key_len == 13) cipher = WLAN_CIPHER_SUITE_WEP104; else return -EINVAL; break; case IW_ENCODE_ALG_TKIP: cipher = WLAN_CIPHER_SUITE_TKIP; break; case IW_ENCODE_ALG_CCMP: cipher = WLAN_CIPHER_SUITE_CCMP; break; case IW_ENCODE_ALG_AES_CMAC: cipher = WLAN_CIPHER_SUITE_AES_CMAC; break; default: return -EOPNOTSUPP; } if (erq->flags & IW_ENCODE_DISABLED) remove = true; idx = erq->flags & IW_ENCODE_INDEX; if (cipher == WLAN_CIPHER_SUITE_AES_CMAC) { if (idx < 4 || idx > 5) { idx = wdev->wext.default_mgmt_key; if (idx < 0) return -EINVAL; } else idx--; } else { if (idx < 1 || idx > 4) { idx = wdev->wext.default_key; if (idx < 0) return -EINVAL; } else idx--; } addr = ext->addr.sa_data; if (is_broadcast_ether_addr(addr)) addr = NULL; memset(&params, 0, sizeof(params)); params.key = ext->key; params.key_len = ext->key_len; params.cipher = cipher; if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { params.seq = ext->rx_seq; params.seq_len = 6; } guard(wiphy)(wdev->wiphy); return cfg80211_set_encryption(rdev, dev, !(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY), addr, remove, ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY, idx, &params); } static int cfg80211_wext_giwencode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *keybuf) { struct iw_point *erq = &wrqu->encoding; struct wireless_dev *wdev = dev->ieee80211_ptr; int idx; if (wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; idx = erq->flags & IW_ENCODE_INDEX; if (idx == 0) { idx = wdev->wext.default_key; if (idx < 0) idx = 0; } else if (idx < 1 || idx > 4) return -EINVAL; else idx--; erq->flags = idx + 1; if (!wdev->wext.keys || !wdev->wext.keys->params[idx].cipher) { erq->flags |= IW_ENCODE_DISABLED; erq->length = 0; return 0; } erq->length = min_t(size_t, erq->length, wdev->wext.keys->params[idx].key_len); memcpy(keybuf, wdev->wext.keys->params[idx].key, erq->length); erq->flags |= IW_ENCODE_ENABLED; return 0; } static int cfg80211_wext_siwfreq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_freq *wextfreq = &wrqu->freq; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_chan_def chandef = { .width = NL80211_CHAN_WIDTH_20_NOHT, }; int freq; guard(wiphy)(&rdev->wiphy); switch (wdev->iftype) { case NL80211_IFTYPE_STATION: return cfg80211_mgd_wext_siwfreq(dev, info, wextfreq, extra); case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra); case NL80211_IFTYPE_MONITOR: freq = cfg80211_wext_freq(wextfreq); if (freq < 0) return freq; if (freq == 0) return -EINVAL; chandef.center_freq1 = freq; chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq); if (!chandef.chan) return -EINVAL; return cfg80211_set_monitor_channel(rdev, dev, &chandef); case NL80211_IFTYPE_MESH_POINT: freq = cfg80211_wext_freq(wextfreq); if (freq < 0) return freq; if (freq == 0) return -EINVAL; chandef.center_freq1 = freq; chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq); if (!chandef.chan) return -EINVAL; return cfg80211_set_mesh_channel(rdev, wdev, &chandef); default: return -EOPNOTSUPP; } } static int cfg80211_wext_giwfreq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_freq *freq = &wrqu->freq; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_chan_def chandef = {}; int ret; guard(wiphy)(&rdev->wiphy); switch (wdev->iftype) { case NL80211_IFTYPE_STATION: return cfg80211_mgd_wext_giwfreq(dev, info, freq, extra); case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra); case NL80211_IFTYPE_MONITOR: if (!rdev->ops->get_channel) return -EINVAL; ret = rdev_get_channel(rdev, wdev, 0, &chandef); if (ret) return ret; freq->m = chandef.chan->center_freq; freq->e = 6; return ret; default: return -EINVAL; } } static int cfg80211_wext_siwtxpower(struct net_device *dev, struct iw_request_info *info, union iwreq_data *data, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); enum nl80211_tx_power_setting type; int dbm = 0; if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) return -EINVAL; if (data->txpower.flags & IW_TXPOW_RANGE) return -EINVAL; if (!rdev->ops->set_tx_power) return -EOPNOTSUPP; /* only change when not disabling */ if (!data->txpower.disabled) { rfkill_set_sw_state(rdev->wiphy.rfkill, false); if (data->txpower.fixed) { /* * wext doesn't support negative values, see * below where it's for automatic */ if (data->txpower.value < 0) return -EINVAL; dbm = data->txpower.value; type = NL80211_TX_POWER_FIXED; /* TODO: do regulatory check! */ } else { /* * Automatic power level setting, max being the value * passed in from userland. */ if (data->txpower.value < 0) { type = NL80211_TX_POWER_AUTOMATIC; } else { dbm = data->txpower.value; type = NL80211_TX_POWER_LIMITED; } } } else { if (rfkill_set_sw_state(rdev->wiphy.rfkill, true)) schedule_work(&rdev->rfkill_block); return 0; } guard(wiphy)(&rdev->wiphy); return rdev_set_tx_power(rdev, wdev, -1, type, DBM_TO_MBM(dbm)); } static int cfg80211_wext_giwtxpower(struct net_device *dev, struct iw_request_info *info, union iwreq_data *data, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); int err, val; if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) return -EINVAL; if (data->txpower.flags & IW_TXPOW_RANGE) return -EINVAL; if (!rdev->ops->get_tx_power) return -EOPNOTSUPP; scoped_guard(wiphy, &rdev->wiphy) { err = rdev_get_tx_power(rdev, wdev, -1, 0, &val); } if (err) return err; /* well... oh well */ data->txpower.fixed = 1; data->txpower.disabled = rfkill_blocked(rdev->wiphy.rfkill); data->txpower.value = val; data->txpower.flags = IW_TXPOW_DBM; return 0; } static int cfg80211_set_auth_alg(struct wireless_dev *wdev, s32 auth_alg) { int nr_alg = 0; if (!auth_alg) return -EINVAL; if (auth_alg & ~(IW_AUTH_ALG_OPEN_SYSTEM | IW_AUTH_ALG_SHARED_KEY | IW_AUTH_ALG_LEAP)) return -EINVAL; if (auth_alg & IW_AUTH_ALG_OPEN_SYSTEM) { nr_alg++; wdev->wext.connect.auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; } if (auth_alg & IW_AUTH_ALG_SHARED_KEY) { nr_alg++; wdev->wext.connect.auth_type = NL80211_AUTHTYPE_SHARED_KEY; } if (auth_alg & IW_AUTH_ALG_LEAP) { nr_alg++; wdev->wext.connect.auth_type = NL80211_AUTHTYPE_NETWORK_EAP; } if (nr_alg > 1) wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; return 0; } static int cfg80211_set_wpa_version(struct wireless_dev *wdev, u32 wpa_versions) { if (wpa_versions & ~(IW_AUTH_WPA_VERSION_WPA | IW_AUTH_WPA_VERSION_WPA2| IW_AUTH_WPA_VERSION_DISABLED)) return -EINVAL; if ((wpa_versions & IW_AUTH_WPA_VERSION_DISABLED) && (wpa_versions & (IW_AUTH_WPA_VERSION_WPA| IW_AUTH_WPA_VERSION_WPA2))) return -EINVAL; if (wpa_versions & IW_AUTH_WPA_VERSION_DISABLED) wdev->wext.connect.crypto.wpa_versions &= ~(NL80211_WPA_VERSION_1|NL80211_WPA_VERSION_2); if (wpa_versions & IW_AUTH_WPA_VERSION_WPA) wdev->wext.connect.crypto.wpa_versions |= NL80211_WPA_VERSION_1; if (wpa_versions & IW_AUTH_WPA_VERSION_WPA2) wdev->wext.connect.crypto.wpa_versions |= NL80211_WPA_VERSION_2; return 0; } static int cfg80211_set_cipher_group(struct wireless_dev *wdev, u32 cipher) { if (cipher & IW_AUTH_CIPHER_WEP40) wdev->wext.connect.crypto.cipher_group = WLAN_CIPHER_SUITE_WEP40; else if (cipher & IW_AUTH_CIPHER_WEP104) wdev->wext.connect.crypto.cipher_group = WLAN_CIPHER_SUITE_WEP104; else if (cipher & IW_AUTH_CIPHER_TKIP) wdev->wext.connect.crypto.cipher_group = WLAN_CIPHER_SUITE_TKIP; else if (cipher & IW_AUTH_CIPHER_CCMP) wdev->wext.connect.crypto.cipher_group = WLAN_CIPHER_SUITE_CCMP; else if (cipher & IW_AUTH_CIPHER_AES_CMAC) wdev->wext.connect.crypto.cipher_group = WLAN_CIPHER_SUITE_AES_CMAC; else if (cipher & IW_AUTH_CIPHER_NONE) wdev->wext.connect.crypto.cipher_group = 0; else return -EINVAL; return 0; } static int cfg80211_set_cipher_pairwise(struct wireless_dev *wdev, u32 cipher) { int nr_ciphers = 0; u32 *ciphers_pairwise = wdev->wext.connect.crypto.ciphers_pairwise; if (cipher & IW_AUTH_CIPHER_WEP40) { ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_WEP40; nr_ciphers++; } if (cipher & IW_AUTH_CIPHER_WEP104) { ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_WEP104; nr_ciphers++; } if (cipher & IW_AUTH_CIPHER_TKIP) { ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_TKIP; nr_ciphers++; } if (cipher & IW_AUTH_CIPHER_CCMP) { ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_CCMP; nr_ciphers++; } if (cipher & IW_AUTH_CIPHER_AES_CMAC) { ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_AES_CMAC; nr_ciphers++; } BUILD_BUG_ON(NL80211_MAX_NR_CIPHER_SUITES < 5); wdev->wext.connect.crypto.n_ciphers_pairwise = nr_ciphers; return 0; } static int cfg80211_set_key_mgt(struct wireless_dev *wdev, u32 key_mgt) { int nr_akm_suites = 0; if (key_mgt & ~(IW_AUTH_KEY_MGMT_802_1X | IW_AUTH_KEY_MGMT_PSK)) return -EINVAL; if (key_mgt & IW_AUTH_KEY_MGMT_802_1X) { wdev->wext.connect.crypto.akm_suites[nr_akm_suites] = WLAN_AKM_SUITE_8021X; nr_akm_suites++; } if (key_mgt & IW_AUTH_KEY_MGMT_PSK) { wdev->wext.connect.crypto.akm_suites[nr_akm_suites] = WLAN_AKM_SUITE_PSK; nr_akm_suites++; } wdev->wext.connect.crypto.n_akm_suites = nr_akm_suites; return 0; } static int cfg80211_wext_siwauth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *data = &wrqu->param; struct wireless_dev *wdev = dev->ieee80211_ptr; if (wdev->iftype != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; switch (data->flags & IW_AUTH_INDEX) { case IW_AUTH_PRIVACY_INVOKED: wdev->wext.connect.privacy = data->value; return 0; case IW_AUTH_WPA_VERSION: return cfg80211_set_wpa_version(wdev, data->value); case IW_AUTH_CIPHER_GROUP: return cfg80211_set_cipher_group(wdev, data->value); case IW_AUTH_KEY_MGMT: return cfg80211_set_key_mgt(wdev, data->value); case IW_AUTH_CIPHER_PAIRWISE: return cfg80211_set_cipher_pairwise(wdev, data->value); case IW_AUTH_80211_AUTH_ALG: return cfg80211_set_auth_alg(wdev, data->value); case IW_AUTH_WPA_ENABLED: case IW_AUTH_RX_UNENCRYPTED_EAPOL: case IW_AUTH_DROP_UNENCRYPTED: case IW_AUTH_MFP: return 0; default: return -EOPNOTSUPP; } } static int cfg80211_wext_giwauth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* XXX: what do we need? */ return -EOPNOTSUPP; } static int cfg80211_wext_siwpower(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *wrq = &wrqu->power; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); bool ps; int timeout = wdev->ps_timeout; int err; if (wdev->iftype != NL80211_IFTYPE_STATION) return -EINVAL; if (!rdev->ops->set_power_mgmt) return -EOPNOTSUPP; if (wrq->disabled) { ps = false; } else { switch (wrq->flags & IW_POWER_MODE) { case IW_POWER_ON: /* If not specified */ case IW_POWER_MODE: /* If set all mask */ case IW_POWER_ALL_R: /* If explicitly state all */ ps = true; break; default: /* Otherwise we ignore */ return -EINVAL; } if (wrq->flags & ~(IW_POWER_MODE | IW_POWER_TIMEOUT)) return -EINVAL; if (wrq->flags & IW_POWER_TIMEOUT) timeout = wrq->value / 1000; } guard(wiphy)(&rdev->wiphy); err = rdev_set_power_mgmt(rdev, dev, ps, timeout); if (err) return err; wdev->ps = ps; wdev->ps_timeout = timeout; return 0; } static int cfg80211_wext_giwpower(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *wrq = &wrqu->power; struct wireless_dev *wdev = dev->ieee80211_ptr; wrq->disabled = !wdev->ps; return 0; } static int cfg80211_wext_siwrate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *rate = &wrqu->bitrate; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_bitrate_mask mask; u32 fixed, maxrate; struct ieee80211_supported_band *sband; bool match = false; int band, ridx; if (!rdev->ops->set_bitrate_mask) return -EOPNOTSUPP; memset(&mask, 0, sizeof(mask)); fixed = 0; maxrate = (u32)-1; if (rate->value < 0) { /* nothing */ } else if (rate->fixed) { fixed = rate->value / 100000; } else { maxrate = rate->value / 100000; } for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = wdev->wiphy->bands[band]; if (sband == NULL) continue; for (ridx = 0; ridx < sband->n_bitrates; ridx++) { struct ieee80211_rate *srate = &sband->bitrates[ridx]; if (fixed == srate->bitrate) { mask.control[band].legacy = 1 << ridx; match = true; break; } if (srate->bitrate <= maxrate) { mask.control[band].legacy |= 1 << ridx; match = true; } } } if (!match) return -EINVAL; guard(wiphy)(&rdev->wiphy); if (dev->ieee80211_ptr->valid_links) return -EOPNOTSUPP; return rdev_set_bitrate_mask(rdev, dev, 0, NULL, &mask); } static int cfg80211_wext_giwrate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *rate = &wrqu->bitrate; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct station_info sinfo = {}; u8 addr[ETH_ALEN]; int err; if (wdev->iftype != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (!rdev->ops->get_station) return -EOPNOTSUPP; err = 0; if (!wdev->valid_links && wdev->links[0].client.current_bss) memcpy(addr, wdev->links[0].client.current_bss->pub.bssid, ETH_ALEN); else err = -EOPNOTSUPP; if (err) return err; scoped_guard(wiphy, &rdev->wiphy) { err = rdev_get_station(rdev, dev, addr, &sinfo); } if (err) return err; if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) { err = -EOPNOTSUPP; goto free; } rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate); free: cfg80211_sinfo_release_content(&sinfo); return err; } /* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); /* we are under RTNL - globally locked - so can use static structs */ static struct iw_statistics wstats; static struct station_info sinfo = {}; u8 bssid[ETH_ALEN]; int ret; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) return NULL; if (!rdev->ops->get_station) return NULL; /* Grab BSSID of current BSS, if any */ wiphy_lock(&rdev->wiphy); if (wdev->valid_links || !wdev->links[0].client.current_bss) { wiphy_unlock(&rdev->wiphy); return NULL; } memcpy(bssid, wdev->links[0].client.current_bss->pub.bssid, ETH_ALEN); memset(&sinfo, 0, sizeof(sinfo)); ret = rdev_get_station(rdev, dev, bssid, &sinfo); wiphy_unlock(&rdev->wiphy); if (ret) return NULL; memset(&wstats, 0, sizeof(wstats)); switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) { int sig = sinfo.signal; wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED; wstats.qual.updated |= IW_QUAL_QUAL_UPDATED; wstats.qual.updated |= IW_QUAL_DBM; wstats.qual.level = sig; if (sig < -110) sig = -110; else if (sig > -40) sig = -40; wstats.qual.qual = sig + 110; break; } fallthrough; case CFG80211_SIGNAL_TYPE_UNSPEC: if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) { wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED; wstats.qual.updated |= IW_QUAL_QUAL_UPDATED; wstats.qual.level = sinfo.signal; wstats.qual.qual = sinfo.signal; break; } fallthrough; default: wstats.qual.updated |= IW_QUAL_LEVEL_INVALID; wstats.qual.updated |= IW_QUAL_QUAL_INVALID; } wstats.qual.updated |= IW_QUAL_NOISE_INVALID; if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC)) wstats.discard.misc = sinfo.rx_dropped_misc; if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED)) wstats.discard.retries = sinfo.tx_failed; cfg80211_sinfo_release_content(&sinfo); return &wstats; } static int cfg80211_wext_siwap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct sockaddr *ap_addr = &wrqu->ap_addr; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); guard(wiphy)(&rdev->wiphy); switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_siwap(dev, info, ap_addr, extra); case NL80211_IFTYPE_STATION: return cfg80211_mgd_wext_siwap(dev, info, ap_addr, extra); default: return -EOPNOTSUPP; } } static int cfg80211_wext_giwap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct sockaddr *ap_addr = &wrqu->ap_addr; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); guard(wiphy)(&rdev->wiphy); switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_giwap(dev, info, ap_addr, extra); case NL80211_IFTYPE_STATION: return cfg80211_mgd_wext_giwap(dev, info, ap_addr, extra); default: return -EOPNOTSUPP; } } static int cfg80211_wext_siwessid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *ssid) { struct iw_point *data = &wrqu->data; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); guard(wiphy)(&rdev->wiphy); switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_siwessid(dev, info, data, ssid); case NL80211_IFTYPE_STATION: return cfg80211_mgd_wext_siwessid(dev, info, data, ssid); default: return -EOPNOTSUPP; } } static int cfg80211_wext_giwessid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *ssid) { struct iw_point *data = &wrqu->data; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); data->flags = 0; data->length = 0; guard(wiphy)(&rdev->wiphy); switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_giwessid(dev, info, data, ssid); case NL80211_IFTYPE_STATION: return cfg80211_mgd_wext_giwessid(dev, info, data, ssid); default: return -EOPNOTSUPP; } } static int cfg80211_wext_siwpmksa(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_pmksa cfg_pmksa; struct iw_pmksa *pmksa = (struct iw_pmksa *)extra; memset(&cfg_pmksa, 0, sizeof(struct cfg80211_pmksa)); if (wdev->iftype != NL80211_IFTYPE_STATION) return -EINVAL; cfg_pmksa.bssid = pmksa->bssid.sa_data; cfg_pmksa.pmkid = pmksa->pmkid; guard(wiphy)(&rdev->wiphy); switch (pmksa->cmd) { case IW_PMKSA_ADD: if (!rdev->ops->set_pmksa) return -EOPNOTSUPP; return rdev_set_pmksa(rdev, dev, &cfg_pmksa); case IW_PMKSA_REMOVE: if (!rdev->ops->del_pmksa) return -EOPNOTSUPP; return rdev_del_pmksa(rdev, dev, &cfg_pmksa); case IW_PMKSA_FLUSH: if (!rdev->ops->flush_pmksa) return -EOPNOTSUPP; return rdev_flush_pmksa(rdev, dev); default: return -EOPNOTSUPP; } } static const iw_handler cfg80211_handlers[] = { IW_HANDLER(SIOCGIWNAME, cfg80211_wext_giwname), IW_HANDLER(SIOCSIWFREQ, cfg80211_wext_siwfreq), IW_HANDLER(SIOCGIWFREQ, cfg80211_wext_giwfreq), IW_HANDLER(SIOCSIWMODE, cfg80211_wext_siwmode), IW_HANDLER(SIOCGIWMODE, cfg80211_wext_giwmode), IW_HANDLER(SIOCGIWRANGE, cfg80211_wext_giwrange), IW_HANDLER(SIOCSIWAP, cfg80211_wext_siwap), IW_HANDLER(SIOCGIWAP, cfg80211_wext_giwap), IW_HANDLER(SIOCSIWMLME, cfg80211_wext_siwmlme), IW_HANDLER(SIOCSIWSCAN, cfg80211_wext_siwscan), IW_HANDLER(SIOCGIWSCAN, cfg80211_wext_giwscan), IW_HANDLER(SIOCSIWESSID, cfg80211_wext_siwessid), IW_HANDLER(SIOCGIWESSID, cfg80211_wext_giwessid), IW_HANDLER(SIOCSIWRATE, cfg80211_wext_siwrate), IW_HANDLER(SIOCGIWRATE, cfg80211_wext_giwrate), IW_HANDLER(SIOCSIWRTS, cfg80211_wext_siwrts), IW_HANDLER(SIOCGIWRTS, cfg80211_wext_giwrts), IW_HANDLER(SIOCSIWFRAG, cfg80211_wext_siwfrag), IW_HANDLER(SIOCGIWFRAG, cfg80211_wext_giwfrag), IW_HANDLER(SIOCSIWTXPOW, cfg80211_wext_siwtxpower), IW_HANDLER(SIOCGIWTXPOW, cfg80211_wext_giwtxpower), IW_HANDLER(SIOCSIWRETRY, cfg80211_wext_siwretry), IW_HANDLER(SIOCGIWRETRY, cfg80211_wext_giwretry), IW_HANDLER(SIOCSIWENCODE, cfg80211_wext_siwencode), IW_HANDLER(SIOCGIWENCODE, cfg80211_wext_giwencode), IW_HANDLER(SIOCSIWPOWER, cfg80211_wext_siwpower), IW_HANDLER(SIOCGIWPOWER, cfg80211_wext_giwpower), IW_HANDLER(SIOCSIWGENIE, cfg80211_wext_siwgenie), IW_HANDLER(SIOCSIWAUTH, cfg80211_wext_siwauth), IW_HANDLER(SIOCGIWAUTH, cfg80211_wext_giwauth), IW_HANDLER(SIOCSIWENCODEEXT, cfg80211_wext_siwencodeext), IW_HANDLER(SIOCSIWPMKSA, cfg80211_wext_siwpmksa), }; const struct iw_handler_def cfg80211_wext_handler = { .num_standard = ARRAY_SIZE(cfg80211_handlers), .standard = cfg80211_handlers, .get_wireless_stats = cfg80211_wireless_stats, };
5 3 3 2 5 6 6 6 6 6 4 3 3 1 4 3 2 2 1 3 2 2 2 3 3 3 12 12 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 // SPDX-License-Identifier: GPL-2.0-only /* tunnel4.c: Generic IP tunnel transformer. * * Copyright (C) 2003 David S. Miller (davem@redhat.com) */ #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/mpls.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/icmp.h> #include <net/ip.h> #include <net/protocol.h> #include <net/xfrm.h> static struct xfrm_tunnel __rcu *tunnel4_handlers __read_mostly; static struct xfrm_tunnel __rcu *tunnel64_handlers __read_mostly; static struct xfrm_tunnel __rcu *tunnelmpls4_handlers __read_mostly; static DEFINE_MUTEX(tunnel4_mutex); static inline struct xfrm_tunnel __rcu **fam_handlers(unsigned short family) { return (family == AF_INET) ? &tunnel4_handlers : (family == AF_INET6) ? &tunnel64_handlers : &tunnelmpls4_handlers; } int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family) { struct xfrm_tunnel __rcu **pprev; struct xfrm_tunnel *t; int ret = -EEXIST; int priority = handler->priority; mutex_lock(&tunnel4_mutex); for (pprev = fam_handlers(family); (t = rcu_dereference_protected(*pprev, lockdep_is_held(&tunnel4_mutex))) != NULL; pprev = &t->next) { if (t->priority > priority) break; if (t->priority == priority) goto err; } handler->next = *pprev; rcu_assign_pointer(*pprev, handler); ret = 0; err: mutex_unlock(&tunnel4_mutex); return ret; } EXPORT_SYMBOL(xfrm4_tunnel_register); int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family) { struct xfrm_tunnel __rcu **pprev; struct xfrm_tunnel *t; int ret = -ENOENT; mutex_lock(&tunnel4_mutex); for (pprev = fam_handlers(family); (t = rcu_dereference_protected(*pprev, lockdep_is_held(&tunnel4_mutex))) != NULL; pprev = &t->next) { if (t == handler) { *pprev = handler->next; ret = 0; break; } } mutex_unlock(&tunnel4_mutex); synchronize_net(); return ret; } EXPORT_SYMBOL(xfrm4_tunnel_deregister); #define for_each_tunnel_rcu(head, handler) \ for (handler = rcu_dereference(head); \ handler != NULL; \ handler = rcu_dereference(handler->next)) \ static int tunnel4_rcv(struct sk_buff *skb) { struct xfrm_tunnel *handler; if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto drop; for_each_tunnel_rcu(tunnel4_handlers, handler) if (!handler->handler(skb)) return 0; icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); drop: kfree_skb(skb); return 0; } #if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) static int tunnel4_rcv_cb(struct sk_buff *skb, u8 proto, int err) { struct xfrm_tunnel __rcu *head; struct xfrm_tunnel *handler; int ret; head = (proto == IPPROTO_IPIP) ? tunnel4_handlers : tunnel64_handlers; for_each_tunnel_rcu(head, handler) { if (handler->cb_handler) { ret = handler->cb_handler(skb, err); if (ret <= 0) return ret; } } return 0; } static const struct xfrm_input_afinfo tunnel4_input_afinfo = { .family = AF_INET, .is_ipip = true, .callback = tunnel4_rcv_cb, }; #endif #if IS_ENABLED(CONFIG_IPV6) static int tunnel64_rcv(struct sk_buff *skb) { struct xfrm_tunnel *handler; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto drop; for_each_tunnel_rcu(tunnel64_handlers, handler) if (!handler->handler(skb)) return 0; icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); drop: kfree_skb(skb); return 0; } #endif #if IS_ENABLED(CONFIG_MPLS) static int tunnelmpls4_rcv(struct sk_buff *skb) { struct xfrm_tunnel *handler; if (!pskb_may_pull(skb, sizeof(struct mpls_label))) goto drop; for_each_tunnel_rcu(tunnelmpls4_handlers, handler) if (!handler->handler(skb)) return 0; icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); drop: kfree_skb(skb); return 0; } #endif static int tunnel4_err(struct sk_buff *skb, u32 info) { struct xfrm_tunnel *handler; for_each_tunnel_rcu(tunnel4_handlers, handler) if (!handler->err_handler(skb, info)) return 0; return -ENOENT; } #if IS_ENABLED(CONFIG_IPV6) static int tunnel64_err(struct sk_buff *skb, u32 info) { struct xfrm_tunnel *handler; for_each_tunnel_rcu(tunnel64_handlers, handler) if (!handler->err_handler(skb, info)) return 0; return -ENOENT; } #endif #if IS_ENABLED(CONFIG_MPLS) static int tunnelmpls4_err(struct sk_buff *skb, u32 info) { struct xfrm_tunnel *handler; for_each_tunnel_rcu(tunnelmpls4_handlers, handler) if (!handler->err_handler(skb, info)) return 0; return -ENOENT; } #endif static const struct net_protocol tunnel4_protocol = { .handler = tunnel4_rcv, .err_handler = tunnel4_err, .no_policy = 1, }; #if IS_ENABLED(CONFIG_IPV6) static const struct net_protocol tunnel64_protocol = { .handler = tunnel64_rcv, .err_handler = tunnel64_err, .no_policy = 1, }; #endif #if IS_ENABLED(CONFIG_MPLS) static const struct net_protocol tunnelmpls4_protocol = { .handler = tunnelmpls4_rcv, .err_handler = tunnelmpls4_err, .no_policy = 1, }; #endif static int __init tunnel4_init(void) { if (inet_add_protocol(&tunnel4_protocol, IPPROTO_IPIP)) goto err; #if IS_ENABLED(CONFIG_IPV6) if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) { inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP); goto err; } #endif #if IS_ENABLED(CONFIG_MPLS) if (inet_add_protocol(&tunnelmpls4_protocol, IPPROTO_MPLS)) { inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP); #if IS_ENABLED(CONFIG_IPV6) inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6); #endif goto err; } #endif #if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) if (xfrm_input_register_afinfo(&tunnel4_input_afinfo)) { inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP); #if IS_ENABLED(CONFIG_IPV6) inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6); #endif #if IS_ENABLED(CONFIG_MPLS) inet_del_protocol(&tunnelmpls4_protocol, IPPROTO_MPLS); #endif goto err; } #endif return 0; err: pr_err("%s: can't add protocol\n", __func__); return -EAGAIN; } static void __exit tunnel4_fini(void) { #if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) if (xfrm_input_unregister_afinfo(&tunnel4_input_afinfo)) pr_err("tunnel4 close: can't remove input afinfo\n"); #endif #if IS_ENABLED(CONFIG_MPLS) if (inet_del_protocol(&tunnelmpls4_protocol, IPPROTO_MPLS)) pr_err("tunnelmpls4 close: can't remove protocol\n"); #endif #if IS_ENABLED(CONFIG_IPV6) if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6)) pr_err("tunnel64 close: can't remove protocol\n"); #endif if (inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP)) pr_err("tunnel4 close: can't remove protocol\n"); } module_init(tunnel4_init); module_exit(tunnel4_fini); MODULE_DESCRIPTION("IPv4 XFRM tunnel library"); MODULE_LICENSE("GPL");
1 2 1 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/fsnotify_backend.h> #include <linux/inotify.h> #include <linux/slab.h> /* struct kmem_cache */ struct inotify_event_info { struct fsnotify_event fse; u32 mask; int wd; u32 sync_cookie; int name_len; char name[]; }; struct inotify_inode_mark { struct fsnotify_mark fsn_mark; int wd; }; static inline struct inotify_event_info *INOTIFY_E(struct fsnotify_event *fse) { return container_of(fse, struct inotify_event_info, fse); } /* * INOTIFY_USER_FLAGS represents all of the mask bits that we expose to * userspace. There is at least one bit (FS_EVENT_ON_CHILD) which is * used only internally to the kernel. */ #define INOTIFY_USER_MASK (IN_ALL_EVENTS) static inline __u32 inotify_mark_user_mask(struct fsnotify_mark *fsn_mark) { __u32 mask = fsn_mark->mask & INOTIFY_USER_MASK; if (fsn_mark->flags & FSNOTIFY_MARK_FLAG_EXCL_UNLINK) mask |= IN_EXCL_UNLINK; if (fsn_mark->flags & FSNOTIFY_MARK_FLAG_IN_ONESHOT) mask |= IN_ONESHOT; return mask; } extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group); extern int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask, struct inode *inode, struct inode *dir, const struct qstr *name, u32 cookie); extern const struct fsnotify_ops inotify_fsnotify_ops; extern struct kmem_cache *inotify_inode_mark_cachep; #ifdef CONFIG_INOTIFY_USER static inline void dec_inotify_instances(struct ucounts *ucounts) { dec_ucount(ucounts, UCOUNT_INOTIFY_INSTANCES); } static inline struct ucounts *inc_inotify_watches(struct ucounts *ucounts) { return inc_ucount(ucounts->ns, ucounts->uid, UCOUNT_INOTIFY_WATCHES); } static inline void dec_inotify_watches(struct ucounts *ucounts) { dec_ucount(ucounts, UCOUNT_INOTIFY_WATCHES); } #endif
32 32 32 32 16 16 16 16 16 16 32 32 32 32 32 32 32 32 32 32 32 32 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 // SPDX-License-Identifier: GPL-2.0 #include <linux/ceph/ceph_debug.h> #include <linux/module.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/ceph/types.h> #include <linux/ceph/decode.h> #include <linux/ceph/libceph.h> #include <linux/ceph/messenger.h> #include "auth_none.h" #include "auth_x.h" /* * get protocol handler */ static u32 supported_protocols[] = { CEPH_AUTH_NONE, CEPH_AUTH_CEPHX }; static int init_protocol(struct ceph_auth_client *ac, int proto) { dout("%s proto %d\n", __func__, proto); switch (proto) { case CEPH_AUTH_NONE: return ceph_auth_none_init(ac); case CEPH_AUTH_CEPHX: return ceph_x_init(ac); default: pr_err("bad auth protocol %d\n", proto); return -EINVAL; } } void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id) { dout("%s global_id %llu\n", __func__, global_id); if (!global_id) pr_err("got zero global_id\n"); if (ac->global_id && global_id != ac->global_id) pr_err("global_id changed from %llu to %llu\n", ac->global_id, global_id); ac->global_id = global_id; } /* * setup, teardown. */ struct ceph_auth_client *ceph_auth_init(const char *name, const struct ceph_crypto_key *key, const int *con_modes) { struct ceph_auth_client *ac; ac = kzalloc(sizeof(*ac), GFP_NOFS); if (!ac) return ERR_PTR(-ENOMEM); mutex_init(&ac->mutex); ac->negotiating = true; if (name) ac->name = name; else ac->name = CEPH_AUTH_NAME_DEFAULT; ac->key = key; ac->preferred_mode = con_modes[0]; ac->fallback_mode = con_modes[1]; dout("%s name '%s' preferred_mode %d fallback_mode %d\n", __func__, ac->name, ac->preferred_mode, ac->fallback_mode); return ac; } void ceph_auth_destroy(struct ceph_auth_client *ac) { dout("auth_destroy %p\n", ac); if (ac->ops) ac->ops->destroy(ac); kfree(ac); } /* * Reset occurs when reconnecting to the monitor. */ void ceph_auth_reset(struct ceph_auth_client *ac) { mutex_lock(&ac->mutex); dout("auth_reset %p\n", ac); if (ac->ops && !ac->negotiating) ac->ops->reset(ac); ac->negotiating = true; mutex_unlock(&ac->mutex); } /* * EntityName, not to be confused with entity_name_t */ int ceph_auth_entity_name_encode(const char *name, void **p, void *end) { int len = strlen(name); if (*p + 2*sizeof(u32) + len > end) return -ERANGE; ceph_encode_32(p, CEPH_ENTITY_TYPE_CLIENT); ceph_encode_32(p, len); ceph_encode_copy(p, name, len); return 0; } /* * Initiate protocol negotiation with monitor. Include entity name * and list supported protocols. */ int ceph_auth_build_hello(struct ceph_auth_client *ac, void *buf, size_t len) { struct ceph_mon_request_header *monhdr = buf; void *p = monhdr + 1, *end = buf + len, *lenp; int i, num; int ret; mutex_lock(&ac->mutex); dout("auth_build_hello\n"); monhdr->have_version = 0; monhdr->session_mon = cpu_to_le16(-1); monhdr->session_mon_tid = 0; ceph_encode_32(&p, CEPH_AUTH_UNKNOWN); /* no protocol, yet */ lenp = p; p += sizeof(u32); ceph_decode_need(&p, end, 1 + sizeof(u32), bad); ceph_encode_8(&p, 1); num = ARRAY_SIZE(supported_protocols); ceph_encode_32(&p, num); ceph_decode_need(&p, end, num * sizeof(u32), bad); for (i = 0; i < num; i++) ceph_encode_32(&p, supported_protocols[i]); ret = ceph_auth_entity_name_encode(ac->name, &p, end); if (ret < 0) goto out; ceph_decode_need(&p, end, sizeof(u64), bad); ceph_encode_64(&p, ac->global_id); ceph_encode_32(&lenp, p - lenp - sizeof(u32)); ret = p - buf; out: mutex_unlock(&ac->mutex); return ret; bad: ret = -ERANGE; goto out; } static int build_request(struct ceph_auth_client *ac, bool add_header, void *buf, int buf_len) { void *end = buf + buf_len; void *p; int ret; p = buf; if (add_header) { /* struct ceph_mon_request_header + protocol */ ceph_encode_64_safe(&p, end, 0, e_range); ceph_encode_16_safe(&p, end, -1, e_range); ceph_encode_64_safe(&p, end, 0, e_range); ceph_encode_32_safe(&p, end, ac->protocol, e_range); } ceph_encode_need(&p, end, sizeof(u32), e_range); ret = ac->ops->build_request(ac, p + sizeof(u32), end); if (ret < 0) { pr_err("auth protocol '%s' building request failed: %d\n", ceph_auth_proto_name(ac->protocol), ret); return ret; } dout(" built request %d bytes\n", ret); ceph_encode_32(&p, ret); return p + ret - buf; e_range: return -ERANGE; } /* * Handle auth message from monitor. */ int ceph_handle_auth_reply(struct ceph_auth_client *ac, void *buf, size_t len, void *reply_buf, size_t reply_len) { void *p = buf; void *end = buf + len; int protocol; s32 result; u64 global_id; void *payload, *payload_end; int payload_len; char *result_msg; int result_msg_len; int ret = -EINVAL; mutex_lock(&ac->mutex); dout("handle_auth_reply %p %p\n", p, end); ceph_decode_need(&p, end, sizeof(u32) * 3 + sizeof(u64), bad); protocol = ceph_decode_32(&p); result = ceph_decode_32(&p); global_id = ceph_decode_64(&p); payload_len = ceph_decode_32(&p); payload = p; p += payload_len; ceph_decode_need(&p, end, sizeof(u32), bad); result_msg_len = ceph_decode_32(&p); result_msg = p; p += result_msg_len; if (p != end) goto bad; dout(" result %d '%.*s' gid %llu len %d\n", result, result_msg_len, result_msg, global_id, payload_len); payload_end = payload + payload_len; if (ac->negotiating) { /* server does not support our protocols? */ if (!protocol && result < 0) { ret = result; goto out; } /* set up (new) protocol handler? */ if (ac->protocol && ac->protocol != protocol) { ac->ops->destroy(ac); ac->protocol = 0; ac->ops = NULL; } if (ac->protocol != protocol) { ret = init_protocol(ac, protocol); if (ret) { pr_err("auth protocol '%s' init failed: %d\n", ceph_auth_proto_name(protocol), ret); goto out; } } ac->negotiating = false; } if (result) { pr_err("auth protocol '%s' mauth authentication failed: %d\n", ceph_auth_proto_name(ac->protocol), result); ret = result; goto out; } ret = ac->ops->handle_reply(ac, global_id, payload, payload_end, NULL, NULL, NULL, NULL); if (ret == -EAGAIN) { ret = build_request(ac, true, reply_buf, reply_len); goto out; } else if (ret) { goto out; } out: mutex_unlock(&ac->mutex); return ret; bad: pr_err("failed to decode auth msg\n"); ret = -EINVAL; goto out; } int ceph_build_auth(struct ceph_auth_client *ac, void *msg_buf, size_t msg_len) { int ret = 0; mutex_lock(&ac->mutex); if (ac->ops->should_authenticate(ac)) ret = build_request(ac, true, msg_buf, msg_len); mutex_unlock(&ac->mutex); return ret; } int ceph_auth_is_authenticated(struct ceph_auth_client *ac) { int ret = 0; mutex_lock(&ac->mutex); if (ac->ops) ret = ac->ops->is_authenticated(ac); mutex_unlock(&ac->mutex); return ret; } EXPORT_SYMBOL(ceph_auth_is_authenticated); int __ceph_auth_get_authorizer(struct ceph_auth_client *ac, struct ceph_auth_handshake *auth, int peer_type, bool force_new, int *proto, int *pref_mode, int *fallb_mode) { int ret; mutex_lock(&ac->mutex); if (force_new && auth->authorizer) { ceph_auth_destroy_authorizer(auth->authorizer); auth->authorizer = NULL; } if (!auth->authorizer) ret = ac->ops->create_authorizer(ac, peer_type, auth); else if (ac->ops->update_authorizer) ret = ac->ops->update_authorizer(ac, peer_type, auth); else ret = 0; if (ret) goto out; *proto = ac->protocol; if (pref_mode && fallb_mode) { *pref_mode = ac->preferred_mode; *fallb_mode = ac->fallback_mode; } out: mutex_unlock(&ac->mutex); return ret; } EXPORT_SYMBOL(__ceph_auth_get_authorizer); void ceph_auth_destroy_authorizer(struct ceph_authorizer *a) { a->destroy(a); } EXPORT_SYMBOL(ceph_auth_destroy_authorizer); int ceph_auth_add_authorizer_challenge(struct ceph_auth_client *ac, struct ceph_authorizer *a, void *challenge_buf, int challenge_buf_len) { int ret = 0; mutex_lock(&ac->mutex); if (ac->ops && ac->ops->add_authorizer_challenge) ret = ac->ops->add_authorizer_challenge(ac, a, challenge_buf, challenge_buf_len); mutex_unlock(&ac->mutex); return ret; } EXPORT_SYMBOL(ceph_auth_add_authorizer_challenge); int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, struct ceph_authorizer *a, void *reply, int reply_len, u8 *session_key, int *session_key_len, u8 *con_secret, int *con_secret_len) { int ret = 0; mutex_lock(&ac->mutex); if (ac->ops && ac->ops->verify_authorizer_reply) ret = ac->ops->verify_authorizer_reply(ac, a, reply, reply_len, session_key, session_key_len, con_secret, con_secret_len); mutex_unlock(&ac->mutex); return ret; } EXPORT_SYMBOL(ceph_auth_verify_authorizer_reply); void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type) { mutex_lock(&ac->mutex); if (ac->ops && ac->ops->invalidate_authorizer) ac->ops->invalidate_authorizer(ac, peer_type); mutex_unlock(&ac->mutex); } EXPORT_SYMBOL(ceph_auth_invalidate_authorizer); /* * msgr2 authentication */ static bool contains(const int *arr, int cnt, int val) { int i; for (i = 0; i < cnt; i++) { if (arr[i] == val) return true; } return false; } static int encode_con_modes(void **p, void *end, int pref_mode, int fallb_mode) { WARN_ON(pref_mode == CEPH_CON_MODE_UNKNOWN); if (fallb_mode != CEPH_CON_MODE_UNKNOWN) { ceph_encode_32_safe(p, end, 2, e_range); ceph_encode_32_safe(p, end, pref_mode, e_range); ceph_encode_32_safe(p, end, fallb_mode, e_range); } else { ceph_encode_32_safe(p, end, 1, e_range); ceph_encode_32_safe(p, end, pref_mode, e_range); } return 0; e_range: return -ERANGE; } /* * Similar to ceph_auth_build_hello(). */ int ceph_auth_get_request(struct ceph_auth_client *ac, void *buf, int buf_len) { int proto = ac->key ? CEPH_AUTH_CEPHX : CEPH_AUTH_NONE; void *end = buf + buf_len; void *lenp; void *p; int ret; mutex_lock(&ac->mutex); if (ac->protocol == CEPH_AUTH_UNKNOWN) { ret = init_protocol(ac, proto); if (ret) { pr_err("auth protocol '%s' init failed: %d\n", ceph_auth_proto_name(proto), ret); goto out; } } else { WARN_ON(ac->protocol != proto); ac->ops->reset(ac); } p = buf; ceph_encode_32_safe(&p, end, ac->protocol, e_range); ret = encode_con_modes(&p, end, ac->preferred_mode, ac->fallback_mode); if (ret) goto out; lenp = p; p += 4; /* space for len */ ceph_encode_8_safe(&p, end, CEPH_AUTH_MODE_MON, e_range); ret = ceph_auth_entity_name_encode(ac->name, &p, end); if (ret) goto out; ceph_encode_64_safe(&p, end, ac->global_id, e_range); ceph_encode_32(&lenp, p - lenp - 4); ret = p - buf; out: mutex_unlock(&ac->mutex); return ret; e_range: ret = -ERANGE; goto out; } int ceph_auth_handle_reply_more(struct ceph_auth_client *ac, void *reply, int reply_len, void *buf, int buf_len) { int ret; mutex_lock(&ac->mutex); ret = ac->ops->handle_reply(ac, 0, reply, reply + reply_len, NULL, NULL, NULL, NULL); if (ret == -EAGAIN) ret = build_request(ac, false, buf, buf_len); else WARN_ON(ret >= 0); mutex_unlock(&ac->mutex); return ret; } int ceph_auth_handle_reply_done(struct ceph_auth_client *ac, u64 global_id, void *reply, int reply_len, u8 *session_key, int *session_key_len, u8 *con_secret, int *con_secret_len) { int ret; mutex_lock(&ac->mutex); ret = ac->ops->handle_reply(ac, global_id, reply, reply + reply_len, session_key, session_key_len, con_secret, con_secret_len); WARN_ON(ret == -EAGAIN || ret > 0); mutex_unlock(&ac->mutex); return ret; } bool ceph_auth_handle_bad_method(struct ceph_auth_client *ac, int used_proto, int result, const int *allowed_protos, int proto_cnt, const int *allowed_modes, int mode_cnt) { mutex_lock(&ac->mutex); WARN_ON(used_proto != ac->protocol); if (result == -EOPNOTSUPP) { if (!contains(allowed_protos, proto_cnt, ac->protocol)) { pr_err("auth protocol '%s' not allowed\n", ceph_auth_proto_name(ac->protocol)); goto not_allowed; } if (!contains(allowed_modes, mode_cnt, ac->preferred_mode) && (ac->fallback_mode == CEPH_CON_MODE_UNKNOWN || !contains(allowed_modes, mode_cnt, ac->fallback_mode))) { pr_err("preferred mode '%s' not allowed\n", ceph_con_mode_name(ac->preferred_mode)); if (ac->fallback_mode == CEPH_CON_MODE_UNKNOWN) pr_err("no fallback mode\n"); else pr_err("fallback mode '%s' not allowed\n", ceph_con_mode_name(ac->fallback_mode)); goto not_allowed; } } WARN_ON(result == -EOPNOTSUPP || result >= 0); pr_err("auth protocol '%s' msgr authentication failed: %d\n", ceph_auth_proto_name(ac->protocol), result); mutex_unlock(&ac->mutex); return true; not_allowed: mutex_unlock(&ac->mutex); return false; } int ceph_auth_get_authorizer(struct ceph_auth_client *ac, struct ceph_auth_handshake *auth, int peer_type, void *buf, int *buf_len) { void *end = buf + *buf_len; int pref_mode, fallb_mode; int proto; void *p; int ret; ret = __ceph_auth_get_authorizer(ac, auth, peer_type, true, &proto, &pref_mode, &fallb_mode); if (ret) return ret; p = buf; ceph_encode_32_safe(&p, end, proto, e_range); ret = encode_con_modes(&p, end, pref_mode, fallb_mode); if (ret) return ret; ceph_encode_32_safe(&p, end, auth->authorizer_buf_len, e_range); *buf_len = p - buf; return 0; e_range: return -ERANGE; } EXPORT_SYMBOL(ceph_auth_get_authorizer); int ceph_auth_handle_svc_reply_more(struct ceph_auth_client *ac, struct ceph_auth_handshake *auth, void *reply, int reply_len, void *buf, int *buf_len) { void *end = buf + *buf_len; void *p; int ret; ret = ceph_auth_add_authorizer_challenge(ac, auth->authorizer, reply, reply_len); if (ret) return ret; p = buf; ceph_encode_32_safe(&p, end, auth->authorizer_buf_len, e_range); *buf_len = p - buf; return 0; e_range: return -ERANGE; } EXPORT_SYMBOL(ceph_auth_handle_svc_reply_more); int ceph_auth_handle_svc_reply_done(struct ceph_auth_client *ac, struct ceph_auth_handshake *auth, void *reply, int reply_len, u8 *session_key, int *session_key_len, u8 *con_secret, int *con_secret_len) { return ceph_auth_verify_authorizer_reply(ac, auth->authorizer, reply, reply_len, session_key, session_key_len, con_secret, con_secret_len); } EXPORT_SYMBOL(ceph_auth_handle_svc_reply_done); bool ceph_auth_handle_bad_authorizer(struct ceph_auth_client *ac, int peer_type, int used_proto, int result, const int *allowed_protos, int proto_cnt, const int *allowed_modes, int mode_cnt) { mutex_lock(&ac->mutex); WARN_ON(used_proto != ac->protocol); if (result == -EOPNOTSUPP) { if (!contains(allowed_protos, proto_cnt, ac->protocol)) { pr_err("auth protocol '%s' not allowed by %s\n", ceph_auth_proto_name(ac->protocol), ceph_entity_type_name(peer_type)); goto not_allowed; } if (!contains(allowed_modes, mode_cnt, ac->preferred_mode) && (ac->fallback_mode == CEPH_CON_MODE_UNKNOWN || !contains(allowed_modes, mode_cnt, ac->fallback_mode))) { pr_err("preferred mode '%s' not allowed by %s\n", ceph_con_mode_name(ac->preferred_mode), ceph_entity_type_name(peer_type)); if (ac->fallback_mode == CEPH_CON_MODE_UNKNOWN) pr_err("no fallback mode\n"); else pr_err("fallback mode '%s' not allowed by %s\n", ceph_con_mode_name(ac->fallback_mode), ceph_entity_type_name(peer_type)); goto not_allowed; } } WARN_ON(result == -EOPNOTSUPP || result >= 0); pr_err("auth protocol '%s' authorization to %s failed: %d\n", ceph_auth_proto_name(ac->protocol), ceph_entity_type_name(peer_type), result); if (ac->ops->invalidate_authorizer) ac->ops->invalidate_authorizer(ac, peer_type); mutex_unlock(&ac->mutex); return true; not_allowed: mutex_unlock(&ac->mutex); return false; } EXPORT_SYMBOL(ceph_auth_handle_bad_authorizer);
82 4 4 4 4 4 5 5 5 5 5 71 1 34 34 32 15 20 3 3 3 11 10 3 3 3 3 3 4 8 7 4 4 68 4 71 71 71 3 3 3 3 3 13 13 13 13 13 13 13 13 4 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 // SPDX-License-Identifier: GPL-2.0 #include <linux/compat.h> #include <linux/console.h> #include <linux/fb.h> #include <linux/fbcon.h> #include <linux/major.h> #include "fb_internal.h" /* * We hold a reference to the fb_info in file->private_data, * but if the current registered fb has changed, we don't * actually want to use it. * * So look up the fb_info using the inode minor number, * and just verify it against the reference we have. */ static struct fb_info *file_fb_info(struct file *file) { struct inode *inode = file_inode(file); int fbidx = iminor(inode); struct fb_info *info = registered_fb[fbidx]; if (info != file->private_data) info = NULL; return info; } static ssize_t fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct fb_info *info = file_fb_info(file); if (!info) return -ENODEV; if (fb_WARN_ON_ONCE(info, !info->fbops->fb_read)) return -EINVAL; if (info->state != FBINFO_STATE_RUNNING) return -EPERM; return info->fbops->fb_read(info, buf, count, ppos); } static ssize_t fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct fb_info *info = file_fb_info(file); if (!info) return -ENODEV; if (fb_WARN_ON_ONCE(info, !info->fbops->fb_write)) return -EINVAL; if (info->state != FBINFO_STATE_RUNNING) return -EPERM; return info->fbops->fb_write(info, buf, count, ppos); } static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { const struct fb_ops *fb; struct fb_var_screeninfo var; struct fb_fix_screeninfo fix; struct fb_cmap cmap_from; struct fb_cmap_user cmap; void __user *argp = (void __user *)arg; long ret = 0; switch (cmd) { case FBIOGET_VSCREENINFO: lock_fb_info(info); var = info->var; unlock_fb_info(info); ret = copy_to_user(argp, &var, sizeof(var)) ? -EFAULT : 0; break; case FBIOPUT_VSCREENINFO: if (copy_from_user(&var, argp, sizeof(var))) return -EFAULT; /* only for kernel-internal use */ var.activate &= ~FB_ACTIVATE_KD_TEXT; console_lock(); lock_fb_info(info); ret = fbcon_modechange_possible(info, &var); if (!ret) ret = fb_set_var(info, &var); if (!ret) fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL); unlock_fb_info(info); console_unlock(); if (!ret && copy_to_user(argp, &var, sizeof(var))) ret = -EFAULT; break; case FBIOGET_FSCREENINFO: lock_fb_info(info); memcpy(&fix, &info->fix, sizeof(fix)); if (info->flags & FBINFO_HIDE_SMEM_START) fix.smem_start = 0; unlock_fb_info(info); ret = copy_to_user(argp, &fix, sizeof(fix)) ? -EFAULT : 0; break; case FBIOPUTCMAP: if (copy_from_user(&cmap, argp, sizeof(cmap))) return -EFAULT; ret = fb_set_user_cmap(&cmap, info); break; case FBIOGETCMAP: if (copy_from_user(&cmap, argp, sizeof(cmap))) return -EFAULT; lock_fb_info(info); cmap_from = info->cmap; unlock_fb_info(info); ret = fb_cmap_to_user(&cmap_from, &cmap); break; case FBIOPAN_DISPLAY: if (copy_from_user(&var, argp, sizeof(var))) return -EFAULT; console_lock(); lock_fb_info(info); ret = fb_pan_display(info, &var); unlock_fb_info(info); console_unlock(); if (ret == 0 && copy_to_user(argp, &var, sizeof(var))) return -EFAULT; break; case FBIO_CURSOR: ret = -EINVAL; break; case FBIOGET_CON2FBMAP: ret = fbcon_get_con2fb_map_ioctl(argp); break; case FBIOPUT_CON2FBMAP: ret = fbcon_set_con2fb_map_ioctl(argp); break; case FBIOBLANK: if (arg > FB_BLANK_POWERDOWN) return -EINVAL; console_lock(); lock_fb_info(info); ret = fb_blank(info, arg); /* might again call into fb_blank */ fbcon_fb_blanked(info, arg); unlock_fb_info(info); console_unlock(); break; default: lock_fb_info(info); fb = info->fbops; if (fb->fb_ioctl) ret = fb->fb_ioctl(info, cmd, arg); else ret = -ENOTTY; unlock_fb_info(info); } return ret; } static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fb_info *info = file_fb_info(file); if (!info) return -ENODEV; return do_fb_ioctl(info, cmd, arg); } #ifdef CONFIG_COMPAT struct fb_fix_screeninfo32 { char id[16]; compat_caddr_t smem_start; u32 smem_len; u32 type; u32 type_aux; u32 visual; u16 xpanstep; u16 ypanstep; u16 ywrapstep; u32 line_length; compat_caddr_t mmio_start; u32 mmio_len; u32 accel; u16 reserved[3]; }; struct fb_cmap32 { u32 start; u32 len; compat_caddr_t red; compat_caddr_t green; compat_caddr_t blue; compat_caddr_t transp; }; static int fb_getput_cmap(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct fb_cmap32 cmap32; struct fb_cmap cmap_from; struct fb_cmap_user cmap; if (copy_from_user(&cmap32, compat_ptr(arg), sizeof(cmap32))) return -EFAULT; cmap = (struct fb_cmap_user) { .start = cmap32.start, .len = cmap32.len, .red = compat_ptr(cmap32.red), .green = compat_ptr(cmap32.green), .blue = compat_ptr(cmap32.blue), .transp = compat_ptr(cmap32.transp), }; if (cmd == FBIOPUTCMAP) return fb_set_user_cmap(&cmap, info); lock_fb_info(info); cmap_from = info->cmap; unlock_fb_info(info); return fb_cmap_to_user(&cmap_from, &cmap); } static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix, struct fb_fix_screeninfo32 __user *fix32) { __u32 data; int err; err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id)); data = (__u32) (unsigned long) fix->smem_start; err |= put_user(data, &fix32->smem_start); err |= put_user(fix->smem_len, &fix32->smem_len); err |= put_user(fix->type, &fix32->type); err |= put_user(fix->type_aux, &fix32->type_aux); err |= put_user(fix->visual, &fix32->visual); err |= put_user(fix->xpanstep, &fix32->xpanstep); err |= put_user(fix->ypanstep, &fix32->ypanstep); err |= put_user(fix->ywrapstep, &fix32->ywrapstep); err |= put_user(fix->line_length, &fix32->line_length); data = (__u32) (unsigned long) fix->mmio_start; err |= put_user(data, &fix32->mmio_start); err |= put_user(fix->mmio_len, &fix32->mmio_len); err |= put_user(fix->accel, &fix32->accel); err |= copy_to_user(fix32->reserved, fix->reserved, sizeof(fix->reserved)); if (err) return -EFAULT; return 0; } static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct fb_fix_screeninfo fix; lock_fb_info(info); fix = info->fix; if (info->flags & FBINFO_HIDE_SMEM_START) fix.smem_start = 0; unlock_fb_info(info); return do_fscreeninfo_to_user(&fix, compat_ptr(arg)); } static long fb_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fb_info *info = file_fb_info(file); const struct fb_ops *fb; long ret = -ENOIOCTLCMD; if (!info) return -ENODEV; fb = info->fbops; switch (cmd) { case FBIOGET_VSCREENINFO: case FBIOPUT_VSCREENINFO: case FBIOPAN_DISPLAY: case FBIOGET_CON2FBMAP: case FBIOPUT_CON2FBMAP: arg = (unsigned long) compat_ptr(arg); fallthrough; case FBIOBLANK: ret = do_fb_ioctl(info, cmd, arg); break; case FBIOGET_FSCREENINFO: ret = fb_get_fscreeninfo(info, cmd, arg); break; case FBIOGETCMAP: case FBIOPUTCMAP: ret = fb_getput_cmap(info, cmd, arg); break; default: if (fb->fb_compat_ioctl) ret = fb->fb_compat_ioctl(info, cmd, arg); break; } return ret; } #endif static int fb_mmap(struct file *file, struct vm_area_struct *vma) { struct fb_info *info = file_fb_info(file); int res; if (!info) return -ENODEV; if (fb_WARN_ON_ONCE(info, !info->fbops->fb_mmap)) return -ENODEV; mutex_lock(&info->mm_lock); res = info->fbops->fb_mmap(info, vma); mutex_unlock(&info->mm_lock); return res; } static int fb_open(struct inode *inode, struct file *file) __acquires(&info->lock) __releases(&info->lock) { int fbidx = iminor(inode); struct fb_info *info; int res = 0; info = get_fb_info(fbidx); if (!info) { request_module("fb%d", fbidx); info = get_fb_info(fbidx); if (!info) return -ENODEV; } if (IS_ERR(info)) return PTR_ERR(info); lock_fb_info(info); if (!try_module_get(info->fbops->owner)) { res = -ENODEV; goto out; } file->private_data = info; if (info->fbops->fb_open) { res = info->fbops->fb_open(info, 1); if (res) module_put(info->fbops->owner); } #ifdef CONFIG_FB_DEFERRED_IO if (info->fbdefio) fb_deferred_io_open(info, inode, file); #endif out: unlock_fb_info(info); if (res) put_fb_info(info); return res; } static int fb_release(struct inode *inode, struct file *file) __acquires(&info->lock) __releases(&info->lock) { struct fb_info * const info = file->private_data; lock_fb_info(info); #if IS_ENABLED(CONFIG_FB_DEFERRED_IO) if (info->fbdefio) fb_deferred_io_release(info); #endif if (info->fbops->fb_release) info->fbops->fb_release(info, 1); module_put(info->fbops->owner); unlock_fb_info(info); put_fb_info(info); return 0; } #if defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA) && !defined(CONFIG_MMU) static unsigned long get_fb_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct fb_info * const info = filp->private_data; unsigned long fb_size = PAGE_ALIGN(info->fix.smem_len); if (pgoff > fb_size || len > fb_size - pgoff) return -EINVAL; return (unsigned long)info->screen_base + pgoff; } #endif static const struct file_operations fb_fops = { .owner = THIS_MODULE, .read = fb_read, .write = fb_write, .unlocked_ioctl = fb_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = fb_compat_ioctl, #endif .mmap = fb_mmap, .open = fb_open, .release = fb_release, #if defined(HAVE_ARCH_FB_UNMAPPED_AREA) || \ (defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA) && \ !defined(CONFIG_MMU)) .get_unmapped_area = get_fb_unmapped_area, #endif #ifdef CONFIG_FB_DEFERRED_IO .fsync = fb_deferred_io_fsync, #endif .llseek = default_llseek, }; int fb_register_chrdev(void) { int ret; ret = register_chrdev(FB_MAJOR, "fb", &fb_fops); if (ret) { pr_err("Unable to get major %d for fb devs\n", FB_MAJOR); return ret; } return ret; } void fb_unregister_chrdev(void) { unregister_chrdev(FB_MAJOR, "fb"); }
138 13 12 13 139 139 139 138 138 139 139 138 139 139 139 138 566 428 146 567 8 521 2 2 2 2 2 6 6 6 6 6 2 4 6 8 19 2 20 19 19 19 19 19 20 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 // SPDX-License-Identifier: GPL-2.0-only /* * Pid namespaces * * Authors: * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM * Many thanks to Oleg Nesterov for comments and help * */ #include <linux/pid.h> #include <linux/pid_namespace.h> #include <linux/user_namespace.h> #include <linux/syscalls.h> #include <linux/cred.h> #include <linux/err.h> #include <linux/acct.h> #include <linux/slab.h> #include <linux/proc_ns.h> #include <linux/reboot.h> #include <linux/export.h> #include <linux/sched/task.h> #include <linux/sched/signal.h> #include <linux/idr.h> #include <linux/nstree.h> #include <uapi/linux/wait.h> #include "pid_sysctl.h" static DEFINE_MUTEX(pid_caches_mutex); static struct kmem_cache *pid_ns_cachep; /* Write once array, filled from the beginning. */ static struct kmem_cache *pid_cache[MAX_PID_NS_LEVEL]; /* * creates the kmem cache to allocate pids from. * @level: pid namespace level */ static struct kmem_cache *create_pid_cachep(unsigned int level) { /* Level 0 is init_pid_ns.pid_cachep */ struct kmem_cache **pkc = &pid_cache[level - 1]; struct kmem_cache *kc; char name[4 + 10 + 1]; unsigned int len; kc = READ_ONCE(*pkc); if (kc) return kc; snprintf(name, sizeof(name), "pid_%u", level + 1); len = struct_size_t(struct pid, numbers, level + 1); mutex_lock(&pid_caches_mutex); /* Name collision forces to do allocation under mutex. */ if (!*pkc) *pkc = kmem_cache_create(name, len, 0, SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL); mutex_unlock(&pid_caches_mutex); /* current can fail, but someone else can succeed. */ return READ_ONCE(*pkc); } static struct ucounts *inc_pid_namespaces(struct user_namespace *ns) { return inc_ucount(ns, current_euid(), UCOUNT_PID_NAMESPACES); } static void dec_pid_namespaces(struct ucounts *ucounts) { dec_ucount(ucounts, UCOUNT_PID_NAMESPACES); } static void destroy_pid_namespace_work(struct work_struct *work); static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns, struct pid_namespace *parent_pid_ns) { struct pid_namespace *ns; unsigned int level = parent_pid_ns->level + 1; struct ucounts *ucounts; int err; err = -EINVAL; if (!in_userns(parent_pid_ns->user_ns, user_ns)) goto out; err = -ENOSPC; if (level > MAX_PID_NS_LEVEL) goto out; ucounts = inc_pid_namespaces(user_ns); if (!ucounts) goto out; err = -ENOMEM; ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); if (ns == NULL) goto out_dec; idr_init(&ns->idr); ns->pid_cachep = create_pid_cachep(level); if (ns->pid_cachep == NULL) goto out_free_idr; err = ns_common_init(ns); if (err) goto out_free_idr; ns->pid_max = PID_MAX_LIMIT; err = register_pidns_sysctls(ns); if (err) goto out_free_inum; ns->level = level; ns->parent = get_pid_ns(parent_pid_ns); ns->user_ns = get_user_ns(user_ns); ns->ucounts = ucounts; ns->pid_allocated = PIDNS_ADDING; INIT_WORK(&ns->work, destroy_pid_namespace_work); #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) ns->memfd_noexec_scope = pidns_memfd_noexec_scope(parent_pid_ns); #endif ns_tree_add(ns); return ns; out_free_inum: ns_common_free(ns); out_free_idr: idr_destroy(&ns->idr); kmem_cache_free(pid_ns_cachep, ns); out_dec: dec_pid_namespaces(ucounts); out: return ERR_PTR(err); } static void delayed_free_pidns(struct rcu_head *p) { struct pid_namespace *ns = container_of(p, struct pid_namespace, rcu); dec_pid_namespaces(ns->ucounts); put_user_ns(ns->user_ns); kmem_cache_free(pid_ns_cachep, ns); } static void destroy_pid_namespace(struct pid_namespace *ns) { ns_tree_remove(ns); unregister_pidns_sysctls(ns); ns_common_free(ns); idr_destroy(&ns->idr); call_rcu(&ns->rcu, delayed_free_pidns); } static void destroy_pid_namespace_work(struct work_struct *work) { struct pid_namespace *ns = container_of(work, struct pid_namespace, work); do { struct pid_namespace *parent; parent = ns->parent; destroy_pid_namespace(ns); ns = parent; } while (ns != &init_pid_ns && ns_ref_put(ns)); } struct pid_namespace *copy_pid_ns(u64 flags, struct user_namespace *user_ns, struct pid_namespace *old_ns) { if (!(flags & CLONE_NEWPID)) return get_pid_ns(old_ns); if (task_active_pid_ns(current) != old_ns) return ERR_PTR(-EINVAL); return create_pid_namespace(user_ns, old_ns); } void put_pid_ns(struct pid_namespace *ns) { if (ns && ns != &init_pid_ns && ns_ref_put(ns)) schedule_work(&ns->work); } EXPORT_SYMBOL_GPL(put_pid_ns); void zap_pid_ns_processes(struct pid_namespace *pid_ns) { int nr; int rc; struct task_struct *task, *me = current; int init_pids = thread_group_leader(me) ? 1 : 2; struct pid *pid; /* Don't allow any more processes into the pid namespace */ disable_pid_allocation(pid_ns); /* * Ignore SIGCHLD causing any terminated children to autoreap. * This speeds up the namespace shutdown, plus see the comment * below. */ spin_lock_irq(&me->sighand->siglock); me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN; spin_unlock_irq(&me->sighand->siglock); /* * The last thread in the cgroup-init thread group is terminating. * Find remaining pid_ts in the namespace, signal and wait for them * to exit. * * Note: This signals each threads in the namespace - even those that * belong to the same thread group, To avoid this, we would have * to walk the entire tasklist looking a processes in this * namespace, but that could be unnecessarily expensive if the * pid namespace has just a few processes. Or we need to * maintain a tasklist for each pid namespace. * */ rcu_read_lock(); read_lock(&tasklist_lock); nr = 2; idr_for_each_entry_continue(&pid_ns->idr, pid, nr) { task = pid_task(pid, PIDTYPE_PID); if (task && !__fatal_signal_pending(task)) group_send_sig_info(SIGKILL, SEND_SIG_PRIV, task, PIDTYPE_MAX); } read_unlock(&tasklist_lock); rcu_read_unlock(); /* * Reap the EXIT_ZOMBIE children we had before we ignored SIGCHLD. * kernel_wait4() will also block until our children traced from the * parent namespace are detached and become EXIT_DEAD. */ do { clear_thread_flag(TIF_SIGPENDING); clear_thread_flag(TIF_NOTIFY_SIGNAL); rc = kernel_wait4(-1, NULL, __WALL, NULL); } while (rc != -ECHILD); /* * kernel_wait4() misses EXIT_DEAD children, and EXIT_ZOMBIE * process whose parents processes are outside of the pid * namespace. Such processes are created with setns()+fork(). * * If those EXIT_ZOMBIE processes are not reaped by their * parents before their parents exit, they will be reparented * to pid_ns->child_reaper. Thus pidns->child_reaper needs to * stay valid until they all go away. * * The code relies on the pid_ns->child_reaper ignoring * SIGCHILD to cause those EXIT_ZOMBIE processes to be * autoreaped if reparented. * * Semantically it is also desirable to wait for EXIT_ZOMBIE * processes before allowing the child_reaper to be reaped, as * that gives the invariant that when the init process of a * pid namespace is reaped all of the processes in the pid * namespace are gone. * * Once all of the other tasks are gone from the pid_namespace * free_pid() will awaken this task. */ for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (pid_ns->pid_allocated == init_pids) break; schedule(); } __set_current_state(TASK_RUNNING); if (pid_ns->reboot) current->signal->group_exit_code = pid_ns->reboot; acct_exit_ns(pid_ns); return; } #ifdef CONFIG_CHECKPOINT_RESTORE static int pid_ns_ctl_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct pid_namespace *pid_ns = task_active_pid_ns(current); struct ctl_table tmp = *table; int ret, next; if (write && !checkpoint_restore_ns_capable(pid_ns->user_ns)) return -EPERM; next = idr_get_cursor(&pid_ns->idr) - 1; tmp.data = &next; tmp.extra2 = &pid_ns->pid_max; ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); if (!ret && write) idr_set_cursor(&pid_ns->idr, next + 1); return ret; } static const struct ctl_table pid_ns_ctl_table[] = { { .procname = "ns_last_pid", .maxlen = sizeof(int), .mode = 0666, /* permissions are checked in the handler */ .proc_handler = pid_ns_ctl_handler, .extra1 = SYSCTL_ZERO, .extra2 = &init_pid_ns.pid_max, }, }; #endif /* CONFIG_CHECKPOINT_RESTORE */ int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) { if (pid_ns == &init_pid_ns) return 0; switch (cmd) { case LINUX_REBOOT_CMD_RESTART2: case LINUX_REBOOT_CMD_RESTART: pid_ns->reboot = SIGHUP; break; case LINUX_REBOOT_CMD_POWER_OFF: case LINUX_REBOOT_CMD_HALT: pid_ns->reboot = SIGINT; break; default: return -EINVAL; } read_lock(&tasklist_lock); send_sig(SIGKILL, pid_ns->child_reaper, 1); read_unlock(&tasklist_lock); do_exit(0); /* Not reached */ return 0; } static struct ns_common *pidns_get(struct task_struct *task) { struct pid_namespace *ns; rcu_read_lock(); ns = task_active_pid_ns(task); if (ns) get_pid_ns(ns); rcu_read_unlock(); return ns ? &ns->ns : NULL; } static struct ns_common *pidns_for_children_get(struct task_struct *task) { struct pid_namespace *ns = NULL; task_lock(task); if (task->nsproxy) { ns = task->nsproxy->pid_ns_for_children; get_pid_ns(ns); } task_unlock(task); if (ns) { read_lock(&tasklist_lock); if (!ns->child_reaper) { put_pid_ns(ns); ns = NULL; } read_unlock(&tasklist_lock); } return ns ? &ns->ns : NULL; } static void pidns_put(struct ns_common *ns) { put_pid_ns(to_pid_ns(ns)); } bool pidns_is_ancestor(struct pid_namespace *child, struct pid_namespace *ancestor) { struct pid_namespace *ns; if (child->level < ancestor->level) return false; for (ns = child; ns->level > ancestor->level; ns = ns->parent) ; return ns == ancestor; } static int pidns_install(struct nsset *nsset, struct ns_common *ns) { struct nsproxy *nsproxy = nsset->nsproxy; struct pid_namespace *active = task_active_pid_ns(current); struct pid_namespace *new = to_pid_ns(ns); if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) || !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN)) return -EPERM; /* * Only allow entering the current active pid namespace * or a child of the current active pid namespace. * * This is required for fork to return a usable pid value and * this maintains the property that processes and their * children can not escape their current pid namespace. */ if (!pidns_is_ancestor(new, active)) return -EINVAL; put_pid_ns(nsproxy->pid_ns_for_children); nsproxy->pid_ns_for_children = get_pid_ns(new); return 0; } static struct ns_common *pidns_get_parent(struct ns_common *ns) { struct pid_namespace *active = task_active_pid_ns(current); struct pid_namespace *pid_ns, *p; /* See if the parent is in the current namespace */ pid_ns = p = to_pid_ns(ns)->parent; for (;;) { if (!p) return ERR_PTR(-EPERM); if (p == active) break; p = p->parent; } return &get_pid_ns(pid_ns)->ns; } static struct user_namespace *pidns_owner(struct ns_common *ns) { return to_pid_ns(ns)->user_ns; } const struct proc_ns_operations pidns_operations = { .name = "pid", .get = pidns_get, .put = pidns_put, .install = pidns_install, .owner = pidns_owner, .get_parent = pidns_get_parent, }; const struct proc_ns_operations pidns_for_children_operations = { .name = "pid_for_children", .real_ns_name = "pid", .get = pidns_for_children_get, .put = pidns_put, .install = pidns_install, .owner = pidns_owner, .get_parent = pidns_get_parent, }; static __init int pid_namespaces_init(void) { pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC | SLAB_ACCOUNT); #ifdef CONFIG_CHECKPOINT_RESTORE register_sysctl_init("kernel", pid_ns_ctl_table); #endif register_pid_ns_sysctl_table_vm(); ns_tree_add(&init_pid_ns); return 0; } __initcall(pid_namespaces_init);
65 38 54 54 54 31 15 31 31 31 30 31 31 31 31 31 31 30 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 // SPDX-License-Identifier: GPL-2.0-only /* * MLO link handling * * Copyright (C) 2022-2025 Intel Corporation */ #include <linux/slab.h> #include <linux/kernel.h> #include <net/mac80211.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "key.h" #include "debugfs_netdev.h" static void ieee80211_update_apvlan_links(struct ieee80211_sub_if_data *sdata) { struct ieee80211_sub_if_data *vlan; struct ieee80211_link_data *link; u16 ap_bss_links = sdata->vif.valid_links; u16 new_links, vlan_links; unsigned long add; list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) { int link_id; if (!vlan) continue; /* No support for 4addr with MLO yet */ if (vlan->wdev.use_4addr) return; vlan_links = vlan->vif.valid_links; new_links = ap_bss_links; add = new_links & ~vlan_links; if (!add) continue; ieee80211_vif_set_links(vlan, add, 0); for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { link = sdata_dereference(vlan->link[link_id], vlan); ieee80211_link_vlan_copy_chanctx(link); } } } void ieee80211_apvlan_link_setup(struct ieee80211_sub_if_data *sdata) { struct ieee80211_sub_if_data *ap_bss = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); u16 new_links = ap_bss->vif.valid_links; unsigned long add; int link_id; if (!ap_bss->vif.valid_links) return; add = new_links; for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { sdata->wdev.valid_links |= BIT(link_id); ether_addr_copy(sdata->wdev.links[link_id].addr, ap_bss->wdev.links[link_id].addr); } ieee80211_vif_set_links(sdata, new_links, 0); } void ieee80211_apvlan_link_clear(struct ieee80211_sub_if_data *sdata) { if (!sdata->wdev.valid_links) return; sdata->wdev.valid_links = 0; ieee80211_vif_clear_links(sdata); } void ieee80211_link_setup(struct ieee80211_link_data *link) { if (link->sdata->vif.type == NL80211_IFTYPE_STATION) ieee80211_mgd_setup_link(link); } void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, int link_id, struct ieee80211_link_data *link, struct ieee80211_bss_conf *link_conf) { bool deflink = link_id < 0; if (link_id < 0) link_id = 0; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { struct ieee80211_sub_if_data *ap_bss; struct ieee80211_bss_conf *ap_bss_conf; ap_bss = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); ap_bss_conf = sdata_dereference(ap_bss->vif.link_conf[link_id], ap_bss); memcpy(link_conf, ap_bss_conf, sizeof(*link_conf)); } link->sdata = sdata; link->link_id = link_id; link->conf = link_conf; link_conf->link_id = link_id; link_conf->vif = &sdata->vif; link->ap_power_level = IEEE80211_UNSET_POWER_LEVEL; link->user_power_level = sdata->local->user_power_level; link_conf->txpower = INT_MIN; wiphy_work_init(&link->csa.finalize_work, ieee80211_csa_finalize_work); wiphy_work_init(&link->color_change_finalize_work, ieee80211_color_change_finalize_work); wiphy_delayed_work_init(&link->color_collision_detect_work, ieee80211_color_collision_detection_work); INIT_LIST_HEAD(&link->assigned_chanctx_list); INIT_LIST_HEAD(&link->reserved_chanctx_list); wiphy_delayed_work_init(&link->dfs_cac_timer_work, ieee80211_dfs_cac_timer_work); if (!deflink) { switch (sdata->vif.type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: ether_addr_copy(link_conf->addr, sdata->wdev.links[link_id].addr); link_conf->bssid = link_conf->addr; WARN_ON(!(sdata->wdev.valid_links & BIT(link_id))); break; case NL80211_IFTYPE_STATION: /* station sets the bssid in ieee80211_mgd_setup_link */ break; default: WARN_ON(1); } ieee80211_link_debugfs_add(link); } rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf); rcu_assign_pointer(sdata->link[link_id], link); } void ieee80211_link_stop(struct ieee80211_link_data *link) { if (link->sdata->vif.type == NL80211_IFTYPE_STATION) ieee80211_mgd_stop_link(link); wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy, &link->color_collision_detect_work); wiphy_work_cancel(link->sdata->local->hw.wiphy, &link->color_change_finalize_work); wiphy_work_cancel(link->sdata->local->hw.wiphy, &link->csa.finalize_work); if (link->sdata->wdev.links[link->link_id].cac_started) { wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy, &link->dfs_cac_timer_work); cfg80211_cac_event(link->sdata->dev, &link->conf->chanreq.oper, NL80211_RADAR_CAC_ABORTED, GFP_KERNEL, link->link_id); } ieee80211_link_release_channel(link); } struct link_container { struct ieee80211_link_data data; struct ieee80211_bss_conf conf; }; static void ieee80211_tear_down_links(struct ieee80211_sub_if_data *sdata, struct link_container **links, u16 mask) { struct ieee80211_link_data *link; LIST_HEAD(keys); unsigned int link_id; for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { if (!(mask & BIT(link_id))) continue; link = &links[link_id]->data; if (link_id == 0 && !link) link = &sdata->deflink; if (WARN_ON(!link)) continue; ieee80211_remove_link_keys(link, &keys); ieee80211_link_debugfs_remove(link); ieee80211_link_stop(link); } synchronize_rcu(); ieee80211_free_key_list(sdata->local, &keys); } static void ieee80211_free_links(struct ieee80211_sub_if_data *sdata, struct link_container **links) { unsigned int link_id; for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) kfree(links[link_id]); } static int ieee80211_check_dup_link_addrs(struct ieee80211_sub_if_data *sdata) { unsigned int i, j; for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { struct ieee80211_link_data *link1; link1 = sdata_dereference(sdata->link[i], sdata); if (!link1) continue; for (j = i + 1; j < IEEE80211_MLD_MAX_NUM_LINKS; j++) { struct ieee80211_link_data *link2; link2 = sdata_dereference(sdata->link[j], sdata); if (!link2) continue; if (ether_addr_equal(link1->conf->addr, link2->conf->addr)) return -EALREADY; } } return 0; } static void ieee80211_set_vif_links_bitmaps(struct ieee80211_sub_if_data *sdata, u16 valid_links, u16 dormant_links) { sdata->vif.valid_links = valid_links; sdata->vif.dormant_links = dormant_links; if (!valid_links || WARN((~valid_links & dormant_links) || !(valid_links & ~dormant_links), "Invalid links: valid=0x%x, dormant=0x%x", valid_links, dormant_links)) { sdata->vif.active_links = 0; sdata->vif.dormant_links = 0; return; } switch (sdata->vif.type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: /* in an AP all links are always active */ sdata->vif.active_links = valid_links; /* AP links are not expected to be disabled */ WARN_ON(dormant_links); break; case NL80211_IFTYPE_STATION: if (sdata->vif.active_links) break; sdata->vif.active_links = valid_links & ~dormant_links; WARN_ON(hweight16(sdata->vif.active_links) > 1); break; default: WARN_ON(1); } } static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata, struct link_container **to_free, u16 new_links, u16 dormant_links) { u16 old_links = sdata->vif.valid_links; u16 old_active = sdata->vif.active_links; unsigned long add = new_links & ~old_links; unsigned long rem = old_links & ~new_links; unsigned int link_id; int ret; struct link_container *links[IEEE80211_MLD_MAX_NUM_LINKS] = {}, *link; struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]; struct ieee80211_link_data *old_data[IEEE80211_MLD_MAX_NUM_LINKS]; bool use_deflink = old_links == 0; /* set for error case */ lockdep_assert_wiphy(sdata->local->hw.wiphy); memset(to_free, 0, sizeof(links)); if (old_links == new_links && dormant_links == sdata->vif.dormant_links) return 0; /* if there were no old links, need to clear the pointers to deflink */ if (!old_links) rem |= BIT(0); /* allocate new link structures first */ for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) { ret = -ENOMEM; goto free; } links[link_id] = link; } /* keep track of the old pointers for the driver */ BUILD_BUG_ON(sizeof(old) != sizeof(sdata->vif.link_conf)); memcpy(old, sdata->vif.link_conf, sizeof(old)); /* and for us in error cases */ BUILD_BUG_ON(sizeof(old_data) != sizeof(sdata->link)); memcpy(old_data, sdata->link, sizeof(old_data)); /* grab old links to free later */ for_each_set_bit(link_id, &rem, IEEE80211_MLD_MAX_NUM_LINKS) { if (rcu_access_pointer(sdata->link[link_id]) != &sdata->deflink) { /* * we must have allocated the data through this path so * we know we can free both at the same time */ to_free[link_id] = container_of(rcu_access_pointer(sdata->link[link_id]), typeof(*links[link_id]), data); } RCU_INIT_POINTER(sdata->link[link_id], NULL); RCU_INIT_POINTER(sdata->vif.link_conf[link_id], NULL); } if (!old_links) ieee80211_debugfs_recreate_netdev(sdata, true); /* link them into data structures */ for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { WARN_ON(!use_deflink && rcu_access_pointer(sdata->link[link_id]) == &sdata->deflink); link = links[link_id]; ieee80211_link_init(sdata, link_id, &link->data, &link->conf); ieee80211_link_setup(&link->data); } if (new_links == 0) ieee80211_link_init(sdata, -1, &sdata->deflink, &sdata->vif.bss_conf); ret = ieee80211_check_dup_link_addrs(sdata); if (!ret) { /* for keys we will not be able to undo this */ ieee80211_tear_down_links(sdata, to_free, rem); ieee80211_set_vif_links_bitmaps(sdata, new_links, dormant_links); /* tell the driver */ if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN) ret = drv_change_vif_links(sdata->local, sdata, old_links & old_active, new_links & sdata->vif.active_links, old); if (!new_links) ieee80211_debugfs_recreate_netdev(sdata, false); if (sdata->vif.type == NL80211_IFTYPE_AP) ieee80211_update_apvlan_links(sdata); } /* * Ignore errors if we are only removing links as removal should * always succeed */ if (!new_links) ret = 0; if (ret) { /* restore config */ memcpy(sdata->link, old_data, sizeof(old_data)); memcpy(sdata->vif.link_conf, old, sizeof(old)); ieee80211_set_vif_links_bitmaps(sdata, old_links, dormant_links); /* and free (only) the newly allocated links */ memset(to_free, 0, sizeof(links)); goto free; } /* use deflink/bss_conf again if and only if there are no more links */ use_deflink = new_links == 0; goto deinit; free: /* if we failed during allocation, only free all */ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { kfree(links[link_id]); links[link_id] = NULL; } deinit: if (use_deflink) ieee80211_link_init(sdata, -1, &sdata->deflink, &sdata->vif.bss_conf); return ret; } int ieee80211_vif_set_links(struct ieee80211_sub_if_data *sdata, u16 new_links, u16 dormant_links) { struct link_container *links[IEEE80211_MLD_MAX_NUM_LINKS]; int ret; ret = ieee80211_vif_update_links(sdata, links, new_links, dormant_links); ieee80211_free_links(sdata, links); return ret; } static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata, u16 active_links) { struct ieee80211_bss_conf *link_confs[IEEE80211_MLD_MAX_NUM_LINKS]; struct ieee80211_local *local = sdata->local; u16 old_active = sdata->vif.active_links; unsigned long rem = old_active & ~active_links; unsigned long add = active_links & ~old_active; struct sta_info *sta; unsigned int link_id; int ret, i; if (!ieee80211_sdata_running(sdata)) return -ENETDOWN; if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EINVAL; if (active_links & ~ieee80211_vif_usable_links(&sdata->vif)) return -EINVAL; /* nothing to do */ if (old_active == active_links) return 0; for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) link_confs[i] = sdata_dereference(sdata->vif.link_conf[i], sdata); if (add) { sdata->vif.active_links |= active_links; ret = drv_change_vif_links(local, sdata, old_active, sdata->vif.active_links, link_confs); if (ret) { sdata->vif.active_links = old_active; return ret; } } for_each_set_bit(link_id, &rem, IEEE80211_MLD_MAX_NUM_LINKS) { struct ieee80211_link_data *link; link = sdata_dereference(sdata->link[link_id], sdata); ieee80211_teardown_tdls_peers(link); __ieee80211_link_release_channel(link, true); /* * If CSA is (still) active while the link is deactivated, * just schedule the channel switch work for the time we * had previously calculated, and we'll take the process * from there. */ if (link->conf->csa_active) wiphy_delayed_work_queue(local->hw.wiphy, &link->u.mgd.csa.switch_work, link->u.mgd.csa.time - jiffies); } for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { struct ieee80211_link_data *link; link = sdata_dereference(sdata->link[link_id], sdata); /* * This call really should not fail. Unfortunately, it appears * that this may happen occasionally with some drivers. Should * it happen, we are stuck in a bad place as going backwards is * not really feasible. * * So lets just tell link_use_channel that it must not fail to * assign the channel context (from mac80211's perspective) and * assume the driver is going to trigger a recovery flow if it * had a failure. * That really is not great nor guaranteed to work. But at least * the internal mac80211 state remains consistent and there is * a chance that we can recover. */ ret = _ieee80211_link_use_channel(link, &link->conf->chanreq, IEEE80211_CHANCTX_SHARED, true); WARN_ON_ONCE(ret); /* * inform about the link info changed parameters after all * stations are also added */ } list_for_each_entry(sta, &local->sta_list, list) { if (sdata != sta->sdata) continue; /* this is very temporary, but do it anyway */ __ieee80211_sta_recalc_aggregates(sta, old_active | active_links); ret = drv_change_sta_links(local, sdata, &sta->sta, old_active, old_active | active_links); WARN_ON_ONCE(ret); } ret = ieee80211_key_switch_links(sdata, rem, add); WARN_ON_ONCE(ret); list_for_each_entry(sta, &local->sta_list, list) { if (sdata != sta->sdata) continue; __ieee80211_sta_recalc_aggregates(sta, active_links); ret = drv_change_sta_links(local, sdata, &sta->sta, old_active | active_links, active_links); WARN_ON_ONCE(ret); /* * Do it again, just in case - the driver might very * well have called ieee80211_sta_recalc_aggregates() * from there when filling in the new links, which * would set it wrong since the vif's active links are * not switched yet... */ __ieee80211_sta_recalc_aggregates(sta, active_links); } for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { struct ieee80211_link_data *link; link = sdata_dereference(sdata->link[link_id], sdata); ieee80211_mgd_set_link_qos_params(link); ieee80211_link_info_change_notify(sdata, link, BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_ERP_SLOT | BSS_CHANGED_HT | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BSSID | BSS_CHANGED_CQM | BSS_CHANGED_QOS | BSS_CHANGED_TXPOWER | BSS_CHANGED_BANDWIDTH | BSS_CHANGED_TWT | BSS_CHANGED_HE_OBSS_PD | BSS_CHANGED_HE_BSS_COLOR); } old_active = sdata->vif.active_links; sdata->vif.active_links = active_links; if (rem) { ret = drv_change_vif_links(local, sdata, old_active, active_links, link_confs); WARN_ON_ONCE(ret); } return 0; } int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; u16 old_active; int ret; lockdep_assert_wiphy(local->hw.wiphy); if (WARN_ON(!active_links)) return -EINVAL; old_active = sdata->vif.active_links; if (old_active == active_links) return 0; if (!drv_can_activate_links(local, sdata, active_links)) return -EINVAL; if (old_active & active_links) { /* * if there's at least one link that stays active across * the change then switch to it (to those) first, and * then enable the additional links */ ret = _ieee80211_set_active_links(sdata, old_active & active_links); if (!ret) ret = _ieee80211_set_active_links(sdata, active_links); } else { /* otherwise switch directly */ ret = _ieee80211_set_active_links(sdata, active_links); } return ret; } EXPORT_SYMBOL_GPL(ieee80211_set_active_links); void ieee80211_set_active_links_async(struct ieee80211_vif *vif, u16 active_links) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); if (WARN_ON(!active_links)) return; if (!ieee80211_sdata_running(sdata)) return; if (sdata->vif.type != NL80211_IFTYPE_STATION) return; if (active_links & ~ieee80211_vif_usable_links(&sdata->vif)) return; /* nothing to do */ if (sdata->vif.active_links == active_links) return; sdata->desired_active_links = active_links; wiphy_work_queue(sdata->local->hw.wiphy, &sdata->activate_links_work); } EXPORT_SYMBOL_GPL(ieee80211_set_active_links_async);
175 271 271 268 267 263 270 4 4 4 4 4 271 271 269 266 5 5 3 3 271 4 4 4 4 4 4 3 3 3 3 4 1 4 179 132 132 298 59 57 58 52 59 21 22 22 16 22 2 2 178 177 176 178 19 4 178 177 178 47 34 48 178 54 54 54 54 54 52 54 54 12 2 2 2 12 12 12 12 25 25 25 17 20 36 86 7 7 1 1 1 1 1 1 1 1 1 1 1 1 1 32 31 31 24 31 33 24 24 24 24 405 7 399 30 98 57 99 97 91 99 178 178 132 158 131 1 278 276 274 270 271 269 271 266 232 232 178 103 27 28 130 130 131 130 206 210 19 19 19 19 4 19 19 19 31 30 31 29 19 19 19 19 19 10 2 10 10 10 2 10 3 17 17 17 4 19 18 17 17 12 12 12 5 5 4 3 4 1 1 1 1 1 1 1 5 4 5 1 1 21 10 22 2 22 1 23 1 23 22 1 11 1 1 1 1 11 22 24 24 24 23 3 3 2 23 2 22 24 4 1 24 24 24 2 24 23 19 24 24 24 8 9 23 21 4 24 23 23 24 23 24 24 24 22 23 23 23 23 23 22 22 22 22 22 8 9 20 19 18 22 19 20 20 20 24 24 24 28 25 27 70 70 17 17 70 66 3 66 22 22 52 52 2 2 2 2 2 80 80 72 74 71 65 66 64 6 5 5 5 75 6 9 8 9 3 3 3 3 3 3 3 3 2 3 1 1 1 1 1 1 1 1 6 5 4 6 3 3 3 3 3 3 3 3 2 1 1 1 1 2 3 3 3 1 1 1 1 1 1 5 4 5 2 1 12 12 12 12 12 12 10 9 10 1 1 2 1 2 2 1 1 4 4 3 3 3 5 11 12 62 62 62 62 62 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> * Patrick Schaaf <bof@bof.de> * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */ /* Kernel module for IP set management */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/rculist.h> #include <net/netlink.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/netfilter.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/ipset/ip_set.h> static LIST_HEAD(ip_set_type_list); /* all registered set types */ static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */ static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */ struct ip_set_net { struct ip_set * __rcu *ip_set_list; /* all individual sets */ ip_set_id_t ip_set_max; /* max number of sets */ bool is_deleted; /* deleted by ip_set_net_exit */ bool is_destroyed; /* all sets are destroyed */ }; static unsigned int ip_set_net_id __read_mostly; static struct ip_set_net *ip_set_pernet(struct net *net) { return net_generic(net, ip_set_net_id); } #define IP_SET_INC 64 #define STRNCMP(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0) static unsigned int max_sets; module_param(max_sets, int, 0600); MODULE_PARM_DESC(max_sets, "maximal number of sets"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>"); MODULE_DESCRIPTION("core IP set support"); MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET); /* When the nfnl mutex or ip_set_ref_lock is held: */ #define ip_set_dereference(inst) \ rcu_dereference_protected((inst)->ip_set_list, \ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \ lockdep_is_held(&ip_set_ref_lock) || \ (inst)->is_deleted) #define ip_set(inst, id) \ ip_set_dereference(inst)[id] #define ip_set_ref_netlink(inst,id) \ rcu_dereference_raw((inst)->ip_set_list)[id] #define ip_set_dereference_nfnl(p) \ rcu_dereference_check(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET)) /* The set types are implemented in modules and registered set types * can be found in ip_set_type_list. Adding/deleting types is * serialized by ip_set_type_mutex. */ static void ip_set_type_lock(void) { mutex_lock(&ip_set_type_mutex); } static void ip_set_type_unlock(void) { mutex_unlock(&ip_set_type_mutex); } /* Register and deregister settype */ static struct ip_set_type * find_set_type(const char *name, u8 family, u8 revision) { struct ip_set_type *type; list_for_each_entry_rcu(type, &ip_set_type_list, list, lockdep_is_held(&ip_set_type_mutex)) if (STRNCMP(type->name, name) && (type->family == family || type->family == NFPROTO_UNSPEC) && revision >= type->revision_min && revision <= type->revision_max) return type; return NULL; } /* Unlock, try to load a set type module and lock again */ static bool load_settype(const char *name) { if (!try_module_get(THIS_MODULE)) return false; nfnl_unlock(NFNL_SUBSYS_IPSET); pr_debug("try to load ip_set_%s\n", name); if (request_module("ip_set_%s", name) < 0) { pr_warn("Can't find ip_set type %s\n", name); nfnl_lock(NFNL_SUBSYS_IPSET); module_put(THIS_MODULE); return false; } nfnl_lock(NFNL_SUBSYS_IPSET); module_put(THIS_MODULE); return true; } /* Find a set type and reference it */ #define find_set_type_get(name, family, revision, found) \ __find_set_type_get(name, family, revision, found, false) static int __find_set_type_get(const char *name, u8 family, u8 revision, struct ip_set_type **found, bool retry) { struct ip_set_type *type; int err; if (retry && !load_settype(name)) return -IPSET_ERR_FIND_TYPE; rcu_read_lock(); *found = find_set_type(name, family, revision); if (*found) { err = !try_module_get((*found)->me) ? -EFAULT : 0; goto unlock; } /* Make sure the type is already loaded * but we don't support the revision */ list_for_each_entry_rcu(type, &ip_set_type_list, list) if (STRNCMP(type->name, name)) { err = -IPSET_ERR_FIND_TYPE; goto unlock; } rcu_read_unlock(); return retry ? -IPSET_ERR_FIND_TYPE : __find_set_type_get(name, family, revision, found, true); unlock: rcu_read_unlock(); return err; } /* Find a given set type by name and family. * If we succeeded, the supported minimal and maximum revisions are * filled out. */ #define find_set_type_minmax(name, family, min, max) \ __find_set_type_minmax(name, family, min, max, false) static int __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max, bool retry) { struct ip_set_type *type; bool found = false; if (retry && !load_settype(name)) return -IPSET_ERR_FIND_TYPE; *min = 255; *max = 0; rcu_read_lock(); list_for_each_entry_rcu(type, &ip_set_type_list, list) if (STRNCMP(type->name, name) && (type->family == family || type->family == NFPROTO_UNSPEC)) { found = true; if (type->revision_min < *min) *min = type->revision_min; if (type->revision_max > *max) *max = type->revision_max; } rcu_read_unlock(); if (found) return 0; return retry ? -IPSET_ERR_FIND_TYPE : __find_set_type_minmax(name, family, min, max, true); } #define family_name(f) ((f) == NFPROTO_IPV4 ? "inet" : \ (f) == NFPROTO_IPV6 ? "inet6" : "any") /* Register a set type structure. The type is identified by * the unique triple of name, family and revision. */ int ip_set_type_register(struct ip_set_type *type) { int ret = 0; if (type->protocol != IPSET_PROTOCOL) { pr_warn("ip_set type %s, family %s, revision %u:%u uses wrong protocol version %u (want %u)\n", type->name, family_name(type->family), type->revision_min, type->revision_max, type->protocol, IPSET_PROTOCOL); return -EINVAL; } ip_set_type_lock(); if (find_set_type(type->name, type->family, type->revision_min)) { /* Duplicate! */ pr_warn("ip_set type %s, family %s with revision min %u already registered!\n", type->name, family_name(type->family), type->revision_min); ip_set_type_unlock(); return -EINVAL; } list_add_rcu(&type->list, &ip_set_type_list); pr_debug("type %s, family %s, revision %u:%u registered.\n", type->name, family_name(type->family), type->revision_min, type->revision_max); ip_set_type_unlock(); return ret; } EXPORT_SYMBOL_GPL(ip_set_type_register); /* Unregister a set type. There's a small race with ip_set_create */ void ip_set_type_unregister(struct ip_set_type *type) { ip_set_type_lock(); if (!find_set_type(type->name, type->family, type->revision_min)) { pr_warn("ip_set type %s, family %s with revision min %u not registered\n", type->name, family_name(type->family), type->revision_min); ip_set_type_unlock(); return; } list_del_rcu(&type->list); pr_debug("type %s, family %s with revision min %u unregistered.\n", type->name, family_name(type->family), type->revision_min); ip_set_type_unlock(); synchronize_rcu(); } EXPORT_SYMBOL_GPL(ip_set_type_unregister); /* Utility functions */ void * ip_set_alloc(size_t size) { return kvzalloc(size, GFP_KERNEL_ACCOUNT); } EXPORT_SYMBOL_GPL(ip_set_alloc); void ip_set_free(void *members) { pr_debug("%p: free with %s\n", members, is_vmalloc_addr(members) ? "vfree" : "kfree"); kvfree(members); } EXPORT_SYMBOL_GPL(ip_set_free); static bool flag_nested(const struct nlattr *nla) { return nla->nla_type & NLA_F_NESTED; } static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = { [IPSET_ATTR_IPADDR_IPV4] = { .type = NLA_U32 }, [IPSET_ATTR_IPADDR_IPV6] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)), }; int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr) { struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1]; if (unlikely(!flag_nested(nla))) return -IPSET_ERR_PROTOCOL; if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy, NULL)) return -IPSET_ERR_PROTOCOL; if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4))) return -IPSET_ERR_PROTOCOL; *ipaddr = nla_get_be32(tb[IPSET_ATTR_IPADDR_IPV4]); return 0; } EXPORT_SYMBOL_GPL(ip_set_get_ipaddr4); int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr) { struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1]; if (unlikely(!flag_nested(nla))) return -IPSET_ERR_PROTOCOL; if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy, NULL)) return -IPSET_ERR_PROTOCOL; if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6))) return -IPSET_ERR_PROTOCOL; memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]), sizeof(struct in6_addr)); return 0; } EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6); static u32 ip_set_timeout_get(const unsigned long *timeout) { u32 t; if (*timeout == IPSET_ELEM_PERMANENT) return 0; t = jiffies_to_msecs(*timeout - jiffies) / MSEC_PER_SEC; /* Zero value in userspace means no timeout */ return t == 0 ? 1 : t; } static char * ip_set_comment_uget(struct nlattr *tb) { return nla_data(tb); } /* Called from uadd only, protected by the set spinlock. * The kadt functions don't use the comment extensions in any way. */ void ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, const struct ip_set_ext *ext) { struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1); size_t len = ext->comment ? strlen(ext->comment) : 0; if (unlikely(c)) { set->ext_size -= sizeof(*c) + strlen(c->str) + 1; kfree_rcu(c, rcu); rcu_assign_pointer(comment->c, NULL); } if (!len) return; if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) len = IPSET_MAX_COMMENT_SIZE; c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC); if (unlikely(!c)) return; strscpy(c->str, ext->comment, len + 1); set->ext_size += sizeof(*c) + strlen(c->str) + 1; rcu_assign_pointer(comment->c, c); } EXPORT_SYMBOL_GPL(ip_set_init_comment); /* Used only when dumping a set, protected by rcu_read_lock() */ static int ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment) { struct ip_set_comment_rcu *c = rcu_dereference(comment->c); if (!c) return 0; return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str); } /* Called from uadd/udel, flush or the garbage collectors protected * by the set spinlock. * Called when the set is destroyed and when there can't be any user * of the set data anymore. */ static void ip_set_comment_free(struct ip_set *set, void *ptr) { struct ip_set_comment *comment = ptr; struct ip_set_comment_rcu *c; c = rcu_dereference_protected(comment->c, 1); if (unlikely(!c)) return; set->ext_size -= sizeof(*c) + strlen(c->str) + 1; kfree_rcu(c, rcu); rcu_assign_pointer(comment->c, NULL); } typedef void (*destroyer)(struct ip_set *, void *); /* ipset data extension types, in size order */ const struct ip_set_ext_type ip_set_extensions[] = { [IPSET_EXT_ID_COUNTER] = { .type = IPSET_EXT_COUNTER, .flag = IPSET_FLAG_WITH_COUNTERS, .len = sizeof(struct ip_set_counter), .align = __alignof__(struct ip_set_counter), }, [IPSET_EXT_ID_TIMEOUT] = { .type = IPSET_EXT_TIMEOUT, .len = sizeof(unsigned long), .align = __alignof__(unsigned long), }, [IPSET_EXT_ID_SKBINFO] = { .type = IPSET_EXT_SKBINFO, .flag = IPSET_FLAG_WITH_SKBINFO, .len = sizeof(struct ip_set_skbinfo), .align = __alignof__(struct ip_set_skbinfo), }, [IPSET_EXT_ID_COMMENT] = { .type = IPSET_EXT_COMMENT | IPSET_EXT_DESTROY, .flag = IPSET_FLAG_WITH_COMMENT, .len = sizeof(struct ip_set_comment), .align = __alignof__(struct ip_set_comment), .destroy = ip_set_comment_free, }, }; EXPORT_SYMBOL_GPL(ip_set_extensions); static bool add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[]) { return ip_set_extensions[id].flag ? (flags & ip_set_extensions[id].flag) : !!tb[IPSET_ATTR_TIMEOUT]; } size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len, size_t align) { enum ip_set_ext_id id; u32 cadt_flags = 0; if (tb[IPSET_ATTR_CADT_FLAGS]) cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_WITH_FORCEADD) set->flags |= IPSET_CREATE_FLAG_FORCEADD; if (!align) align = 1; for (id = 0; id < IPSET_EXT_ID_MAX; id++) { if (!add_extension(id, cadt_flags, tb)) continue; if (align < ip_set_extensions[id].align) align = ip_set_extensions[id].align; len = ALIGN(len, ip_set_extensions[id].align); set->offset[id] = len; set->extensions |= ip_set_extensions[id].type; len += ip_set_extensions[id].len; } return ALIGN(len, align); } EXPORT_SYMBOL_GPL(ip_set_elem_len); int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], struct ip_set_ext *ext) { u64 fullmark; if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_TIMEOUT]) { if (!SET_WITH_TIMEOUT(set)) return -IPSET_ERR_TIMEOUT; ext->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } if (tb[IPSET_ATTR_BYTES] || tb[IPSET_ATTR_PACKETS]) { if (!SET_WITH_COUNTER(set)) return -IPSET_ERR_COUNTER; if (tb[IPSET_ATTR_BYTES]) ext->bytes = be64_to_cpu(nla_get_be64( tb[IPSET_ATTR_BYTES])); if (tb[IPSET_ATTR_PACKETS]) ext->packets = be64_to_cpu(nla_get_be64( tb[IPSET_ATTR_PACKETS])); } if (tb[IPSET_ATTR_COMMENT]) { if (!SET_WITH_COMMENT(set)) return -IPSET_ERR_COMMENT; ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]); } if (tb[IPSET_ATTR_SKBMARK]) { if (!SET_WITH_SKBINFO(set)) return -IPSET_ERR_SKBINFO; fullmark = be64_to_cpu(nla_get_be64(tb[IPSET_ATTR_SKBMARK])); ext->skbinfo.skbmark = fullmark >> 32; ext->skbinfo.skbmarkmask = fullmark & 0xffffffff; } if (tb[IPSET_ATTR_SKBPRIO]) { if (!SET_WITH_SKBINFO(set)) return -IPSET_ERR_SKBINFO; ext->skbinfo.skbprio = be32_to_cpu(nla_get_be32(tb[IPSET_ATTR_SKBPRIO])); } if (tb[IPSET_ATTR_SKBQUEUE]) { if (!SET_WITH_SKBINFO(set)) return -IPSET_ERR_SKBINFO; ext->skbinfo.skbqueue = be16_to_cpu(nla_get_be16(tb[IPSET_ATTR_SKBQUEUE])); } return 0; } EXPORT_SYMBOL_GPL(ip_set_get_extensions); static u64 ip_set_get_bytes(const struct ip_set_counter *counter) { return (u64)atomic64_read(&(counter)->bytes); } static u64 ip_set_get_packets(const struct ip_set_counter *counter) { return (u64)atomic64_read(&(counter)->packets); } static bool ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter) { return nla_put_net64(skb, IPSET_ATTR_BYTES, cpu_to_be64(ip_set_get_bytes(counter)), IPSET_ATTR_PAD) || nla_put_net64(skb, IPSET_ATTR_PACKETS, cpu_to_be64(ip_set_get_packets(counter)), IPSET_ATTR_PAD); } static bool ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo) { /* Send nonzero parameters only */ return ((skbinfo->skbmark || skbinfo->skbmarkmask) && nla_put_net64(skb, IPSET_ATTR_SKBMARK, cpu_to_be64((u64)skbinfo->skbmark << 32 | skbinfo->skbmarkmask), IPSET_ATTR_PAD)) || (skbinfo->skbprio && nla_put_net32(skb, IPSET_ATTR_SKBPRIO, cpu_to_be32(skbinfo->skbprio))) || (skbinfo->skbqueue && nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, cpu_to_be16(skbinfo->skbqueue))); } int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, const void *e, bool active) { if (SET_WITH_TIMEOUT(set)) { unsigned long *timeout = ext_timeout(e, set); if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(active ? ip_set_timeout_get(timeout) : *timeout))) return -EMSGSIZE; } if (SET_WITH_COUNTER(set) && ip_set_put_counter(skb, ext_counter(e, set))) return -EMSGSIZE; if (SET_WITH_COMMENT(set) && ip_set_put_comment(skb, ext_comment(e, set))) return -EMSGSIZE; if (SET_WITH_SKBINFO(set) && ip_set_put_skbinfo(skb, ext_skbinfo(e, set))) return -EMSGSIZE; return 0; } EXPORT_SYMBOL_GPL(ip_set_put_extensions); static bool ip_set_match_counter(u64 counter, u64 match, u8 op) { switch (op) { case IPSET_COUNTER_NONE: return true; case IPSET_COUNTER_EQ: return counter == match; case IPSET_COUNTER_NE: return counter != match; case IPSET_COUNTER_LT: return counter < match; case IPSET_COUNTER_GT: return counter > match; } return false; } static void ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) { atomic64_add((long long)bytes, &(counter)->bytes); } static void ip_set_add_packets(u64 packets, struct ip_set_counter *counter) { atomic64_add((long long)packets, &(counter)->packets); } static void ip_set_update_counter(struct ip_set_counter *counter, const struct ip_set_ext *ext, u32 flags) { if (ext->packets != ULLONG_MAX && !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { ip_set_add_bytes(ext->bytes, counter); ip_set_add_packets(ext->packets, counter); } } static void ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, const struct ip_set_ext *ext, struct ip_set_ext *mext, u32 flags) { mext->skbinfo = *skbinfo; } bool ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext, struct ip_set_ext *mext, u32 flags, void *data) { if (SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(data, set))) return false; if (SET_WITH_COUNTER(set)) { struct ip_set_counter *counter = ext_counter(data, set); ip_set_update_counter(counter, ext, flags); if (flags & IPSET_FLAG_MATCH_COUNTERS && !(ip_set_match_counter(ip_set_get_packets(counter), mext->packets, mext->packets_op) && ip_set_match_counter(ip_set_get_bytes(counter), mext->bytes, mext->bytes_op))) return false; } if (SET_WITH_SKBINFO(set)) ip_set_get_skbinfo(ext_skbinfo(data, set), ext, mext, flags); return true; } EXPORT_SYMBOL_GPL(ip_set_match_extensions); /* Creating/destroying/renaming/swapping affect the existence and * the properties of a set. All of these can be executed from userspace * only and serialized by the nfnl mutex indirectly from nfnetlink. * * Sets are identified by their index in ip_set_list and the index * is used by the external references (set/SET netfilter modules). * * The set behind an index may change by swapping only, from userspace. */ static void __ip_set_get(struct ip_set *set) { write_lock_bh(&ip_set_ref_lock); set->ref++; write_unlock_bh(&ip_set_ref_lock); } static void __ip_set_put(struct ip_set *set) { write_lock_bh(&ip_set_ref_lock); BUG_ON(set->ref == 0); set->ref--; write_unlock_bh(&ip_set_ref_lock); } /* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need * a separate reference counter */ static void __ip_set_get_netlink(struct ip_set *set) { write_lock_bh(&ip_set_ref_lock); set->ref_netlink++; write_unlock_bh(&ip_set_ref_lock); } static void __ip_set_put_netlink(struct ip_set *set) { write_lock_bh(&ip_set_ref_lock); BUG_ON(set->ref_netlink == 0); set->ref_netlink--; write_unlock_bh(&ip_set_ref_lock); } /* Add, del and test set entries from kernel. * * The set behind the index must exist and must be referenced * so it can't be destroyed (or changed) under our foot. */ static struct ip_set * ip_set_rcu_get(struct net *net, ip_set_id_t index) { struct ip_set_net *inst = ip_set_pernet(net); /* ip_set_list and the set pointer need to be protected */ return ip_set_dereference_nfnl(inst->ip_set_list)[index]; } static inline void ip_set_lock(struct ip_set *set) { if (!set->variant->region_lock) spin_lock_bh(&set->lock); } static inline void ip_set_unlock(struct ip_set *set) { if (!set->variant->region_lock) spin_unlock_bh(&set->lock); } int ip_set_test(ip_set_id_t index, const struct sk_buff *skb, const struct xt_action_param *par, struct ip_set_adt_opt *opt) { struct ip_set *set = ip_set_rcu_get(xt_net(par), index); int ret = 0; BUG_ON(!set); pr_debug("set %s, index %u\n", set->name, index); if (opt->dim < set->type->dimension || !(opt->family == set->family || set->family == NFPROTO_UNSPEC)) return 0; ret = set->variant->kadt(set, skb, par, IPSET_TEST, opt); if (ret == -EAGAIN) { /* Type requests element to be completed */ pr_debug("element must be completed, ADD is triggered\n"); ip_set_lock(set); set->variant->kadt(set, skb, par, IPSET_ADD, opt); ip_set_unlock(set); ret = 1; } else { /* --return-nomatch: invert matched element */ if ((opt->cmdflags & IPSET_FLAG_RETURN_NOMATCH) && (set->type->features & IPSET_TYPE_NOMATCH) && (ret > 0 || ret == -ENOTEMPTY)) ret = -ret; } /* Convert error codes to nomatch */ return (ret < 0 ? 0 : ret); } EXPORT_SYMBOL_GPL(ip_set_test); int ip_set_add(ip_set_id_t index, const struct sk_buff *skb, const struct xt_action_param *par, struct ip_set_adt_opt *opt) { struct ip_set *set = ip_set_rcu_get(xt_net(par), index); int ret; BUG_ON(!set); pr_debug("set %s, index %u\n", set->name, index); if (opt->dim < set->type->dimension || !(opt->family == set->family || set->family == NFPROTO_UNSPEC)) return -IPSET_ERR_TYPE_MISMATCH; ip_set_lock(set); ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt); ip_set_unlock(set); return ret; } EXPORT_SYMBOL_GPL(ip_set_add); int ip_set_del(ip_set_id_t index, const struct sk_buff *skb, const struct xt_action_param *par, struct ip_set_adt_opt *opt) { struct ip_set *set = ip_set_rcu_get(xt_net(par), index); int ret = 0; BUG_ON(!set); pr_debug("set %s, index %u\n", set->name, index); if (opt->dim < set->type->dimension || !(opt->family == set->family || set->family == NFPROTO_UNSPEC)) return -IPSET_ERR_TYPE_MISMATCH; ip_set_lock(set); ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt); ip_set_unlock(set); return ret; } EXPORT_SYMBOL_GPL(ip_set_del); /* Find set by name, reference it once. The reference makes sure the * thing pointed to, does not go away under our feet. * */ ip_set_id_t ip_set_get_byname(struct net *net, const char *name, struct ip_set **set) { ip_set_id_t i, index = IPSET_INVALID_ID; struct ip_set *s; struct ip_set_net *inst = ip_set_pernet(net); rcu_read_lock(); for (i = 0; i < inst->ip_set_max; i++) { s = rcu_dereference(inst->ip_set_list)[i]; if (s && STRNCMP(s->name, name)) { __ip_set_get(s); index = i; *set = s; break; } } rcu_read_unlock(); return index; } EXPORT_SYMBOL_GPL(ip_set_get_byname); /* If the given set pointer points to a valid set, decrement * reference count by 1. The caller shall not assume the index * to be valid, after calling this function. * */ static void __ip_set_put_byindex(struct ip_set_net *inst, ip_set_id_t index) { struct ip_set *set; rcu_read_lock(); set = rcu_dereference(inst->ip_set_list)[index]; if (set) __ip_set_put(set); rcu_read_unlock(); } void ip_set_put_byindex(struct net *net, ip_set_id_t index) { struct ip_set_net *inst = ip_set_pernet(net); __ip_set_put_byindex(inst, index); } EXPORT_SYMBOL_GPL(ip_set_put_byindex); /* Get the name of a set behind a set index. * Set itself is protected by RCU, but its name isn't: to protect against * renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the * name. */ void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name) { struct ip_set *set = ip_set_rcu_get(net, index); BUG_ON(!set); read_lock_bh(&ip_set_ref_lock); strscpy_pad(name, set->name, IPSET_MAXNAMELEN); read_unlock_bh(&ip_set_ref_lock); } EXPORT_SYMBOL_GPL(ip_set_name_byindex); /* Routines to call by external subsystems, which do not * call nfnl_lock for us. */ /* Find set by index, reference it once. The reference makes sure the * thing pointed to, does not go away under our feet. * * The nfnl mutex is used in the function. */ ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index) { struct ip_set *set; struct ip_set_net *inst = ip_set_pernet(net); if (index >= inst->ip_set_max) return IPSET_INVALID_ID; nfnl_lock(NFNL_SUBSYS_IPSET); set = ip_set(inst, index); if (set) __ip_set_get(set); else index = IPSET_INVALID_ID; nfnl_unlock(NFNL_SUBSYS_IPSET); return index; } EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex); /* If the given set pointer points to a valid set, decrement * reference count by 1. The caller shall not assume the index * to be valid, after calling this function. * * The nfnl mutex is used in the function. */ void ip_set_nfnl_put(struct net *net, ip_set_id_t index) { struct ip_set *set; struct ip_set_net *inst = ip_set_pernet(net); nfnl_lock(NFNL_SUBSYS_IPSET); if (!inst->is_deleted) { /* already deleted from ip_set_net_exit() */ set = ip_set(inst, index); if (set) __ip_set_put(set); } nfnl_unlock(NFNL_SUBSYS_IPSET); } EXPORT_SYMBOL_GPL(ip_set_nfnl_put); /* Communication protocol with userspace over netlink. * * The commands are serialized by the nfnl mutex. */ static inline u8 protocol(const struct nlattr * const tb[]) { return nla_get_u8(tb[IPSET_ATTR_PROTOCOL]); } static inline bool protocol_failed(const struct nlattr * const tb[]) { return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) != IPSET_PROTOCOL; } static inline bool protocol_min_failed(const struct nlattr * const tb[]) { return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) < IPSET_PROTOCOL_MIN; } static inline u32 flag_exist(const struct nlmsghdr *nlh) { return nlh->nlmsg_flags & NLM_F_EXCL ? 0 : IPSET_FLAG_EXIST; } static struct nlmsghdr * start_msg(struct sk_buff *skb, u32 portid, u32 seq, unsigned int flags, enum ipset_cmd cmd) { return nfnl_msg_put(skb, portid, seq, nfnl_msg_type(NFNL_SUBSYS_IPSET, cmd), flags, NFPROTO_IPV4, NFNETLINK_V0, 0); } /* Create a set */ static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1}, [IPSET_ATTR_REVISION] = { .type = NLA_U8 }, [IPSET_ATTR_FAMILY] = { .type = NLA_U8 }, [IPSET_ATTR_DATA] = { .type = NLA_NESTED }, }; static struct ip_set * find_set_and_id(struct ip_set_net *inst, const char *name, ip_set_id_t *id) { struct ip_set *set = NULL; ip_set_id_t i; *id = IPSET_INVALID_ID; for (i = 0; i < inst->ip_set_max; i++) { set = ip_set(inst, i); if (set && STRNCMP(set->name, name)) { *id = i; break; } } return (*id == IPSET_INVALID_ID ? NULL : set); } static inline struct ip_set * find_set(struct ip_set_net *inst, const char *name) { ip_set_id_t id; return find_set_and_id(inst, name, &id); } static int find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index, struct ip_set **set) { struct ip_set *s; ip_set_id_t i; *index = IPSET_INVALID_ID; for (i = 0; i < inst->ip_set_max; i++) { s = ip_set(inst, i); if (!s) { if (*index == IPSET_INVALID_ID) *index = i; } else if (STRNCMP(name, s->name)) { /* Name clash */ *set = s; return -EEXIST; } } if (*index == IPSET_INVALID_ID) /* No free slot remained */ return -IPSET_ERR_MAX_SETS; return 0; } static int ip_set_none(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { return -EOPNOTSUPP; } static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *set, *clash = NULL; ip_set_id_t index = IPSET_INVALID_ID; struct nlattr *tb[IPSET_ATTR_CREATE_MAX + 1] = {}; const char *name, *typename; u8 family, revision; u32 flags = flag_exist(info->nlh); int ret = 0; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || !attr[IPSET_ATTR_TYPENAME] || !attr[IPSET_ATTR_REVISION] || !attr[IPSET_ATTR_FAMILY] || (attr[IPSET_ATTR_DATA] && !flag_nested(attr[IPSET_ATTR_DATA])))) return -IPSET_ERR_PROTOCOL; name = nla_data(attr[IPSET_ATTR_SETNAME]); typename = nla_data(attr[IPSET_ATTR_TYPENAME]); family = nla_get_u8(attr[IPSET_ATTR_FAMILY]); revision = nla_get_u8(attr[IPSET_ATTR_REVISION]); pr_debug("setname: %s, typename: %s, family: %s, revision: %u\n", name, typename, family_name(family), revision); /* First, and without any locks, allocate and initialize * a normal base set structure. */ set = kzalloc(sizeof(*set), GFP_KERNEL); if (!set) return -ENOMEM; spin_lock_init(&set->lock); strscpy(set->name, name, IPSET_MAXNAMELEN); set->family = family; set->revision = revision; /* Next, check that we know the type, and take * a reference on the type, to make sure it stays available * while constructing our new set. * * After referencing the type, we try to create the type * specific part of the set without holding any locks. */ ret = find_set_type_get(typename, family, revision, &set->type); if (ret) goto out; /* Without holding any locks, create private part. */ if (attr[IPSET_ATTR_DATA] && nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA], set->type->create_policy, NULL)) { ret = -IPSET_ERR_PROTOCOL; goto put_out; } /* Set create flags depending on the type revision */ set->flags |= set->type->create_flags[revision]; ret = set->type->create(info->net, set, tb, flags); if (ret != 0) goto put_out; /* BTW, ret==0 here. */ /* Here, we have a valid, constructed set and we are protected * by the nfnl mutex. Find the first free index in ip_set_list * and check clashing. */ ret = find_free_id(inst, set->name, &index, &clash); if (ret == -EEXIST) { /* If this is the same set and requested, ignore error */ if ((flags & IPSET_FLAG_EXIST) && STRNCMP(set->type->name, clash->type->name) && set->type->family == clash->type->family && set->type->revision_min == clash->type->revision_min && set->type->revision_max == clash->type->revision_max && set->variant->same_set(set, clash)) ret = 0; goto cleanup; } else if (ret == -IPSET_ERR_MAX_SETS) { struct ip_set **list, **tmp; ip_set_id_t i = inst->ip_set_max + IP_SET_INC; if (i < inst->ip_set_max || i == IPSET_INVALID_ID) /* Wraparound */ goto cleanup; list = kvcalloc(i, sizeof(struct ip_set *), GFP_KERNEL); if (!list) goto cleanup; /* nfnl mutex is held, both lists are valid */ tmp = ip_set_dereference(inst); memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max); rcu_assign_pointer(inst->ip_set_list, list); /* Make sure all current packets have passed through */ synchronize_net(); /* Use new list */ index = inst->ip_set_max; inst->ip_set_max = i; kvfree(tmp); ret = 0; } else if (ret) { goto cleanup; } /* Finally! Add our shiny new set to the list, and be done. */ pr_debug("create: '%s' created with index %u!\n", set->name, index); ip_set(inst, index) = set; return ret; cleanup: set->variant->cancel_gc(set); set->variant->destroy(set); put_out: module_put(set->type->me); out: kfree(set); return ret; } /* Destroy sets */ static const struct nla_policy ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, }; /* In order to return quickly when destroying a single set, it is split * into two stages: * - Cancel garbage collector * - Destroy the set itself via call_rcu() */ static void ip_set_destroy_set_rcu(struct rcu_head *head) { struct ip_set *set = container_of(head, struct ip_set, rcu); set->variant->destroy(set); module_put(set->type->me); kfree(set); } static void _destroy_all_sets(struct ip_set_net *inst) { struct ip_set *set; ip_set_id_t i; bool need_wait = false; /* First cancel gc's: set:list sets are flushed as well */ for (i = 0; i < inst->ip_set_max; i++) { set = ip_set(inst, i); if (set) { set->variant->cancel_gc(set); if (set->type->features & IPSET_TYPE_NAME) need_wait = true; } } /* Must wait for flush to be really finished */ if (need_wait) rcu_barrier(); for (i = 0; i < inst->ip_set_max; i++) { set = ip_set(inst, i); if (set) { ip_set(inst, i) = NULL; set->variant->destroy(set); module_put(set->type->me); kfree(set); } } } static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *s; ip_set_id_t i; int ret = 0; if (unlikely(protocol_min_failed(attr))) return -IPSET_ERR_PROTOCOL; /* Commands are serialized and references are * protected by the ip_set_ref_lock. * External systems (i.e. xt_set) must call * ip_set_nfnl_get_* functions, that way we * can safely check references here. * * list:set timer can only decrement the reference * counter, so if it's already zero, we can proceed * without holding the lock. */ if (!attr[IPSET_ATTR_SETNAME]) { read_lock_bh(&ip_set_ref_lock); for (i = 0; i < inst->ip_set_max; i++) { s = ip_set(inst, i); if (s && (s->ref || s->ref_netlink)) { ret = -IPSET_ERR_BUSY; goto out; } } inst->is_destroyed = true; read_unlock_bh(&ip_set_ref_lock); _destroy_all_sets(inst); /* Modified by ip_set_destroy() only, which is serialized */ inst->is_destroyed = false; } else { u32 flags = flag_exist(info->nlh); u16 features = 0; read_lock_bh(&ip_set_ref_lock); s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]), &i); if (!s) { if (!(flags & IPSET_FLAG_EXIST)) ret = -ENOENT; goto out; } else if (s->ref || s->ref_netlink) { ret = -IPSET_ERR_BUSY; goto out; } features = s->type->features; ip_set(inst, i) = NULL; read_unlock_bh(&ip_set_ref_lock); /* Must cancel garbage collectors */ s->variant->cancel_gc(s); if (features & IPSET_TYPE_NAME) { /* Must wait for flush to be really finished */ rcu_barrier(); } call_rcu(&s->rcu, ip_set_destroy_set_rcu); } return 0; out: read_unlock_bh(&ip_set_ref_lock); return ret; } /* Flush sets */ static void ip_set_flush_set(struct ip_set *set) { pr_debug("set: %s\n", set->name); ip_set_lock(set); set->variant->flush(set); ip_set_unlock(set); } static int ip_set_flush(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *s; ip_set_id_t i; if (unlikely(protocol_min_failed(attr))) return -IPSET_ERR_PROTOCOL; if (!attr[IPSET_ATTR_SETNAME]) { for (i = 0; i < inst->ip_set_max; i++) { s = ip_set(inst, i); if (s) ip_set_flush_set(s); } } else { s = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); if (!s) return -ENOENT; ip_set_flush_set(s); } return 0; } /* Rename a set */ static const struct nla_policy ip_set_setname2_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, [IPSET_ATTR_SETNAME2] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, }; static int ip_set_rename(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *set, *s; const char *name2; ip_set_id_t i; int ret = 0; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || !attr[IPSET_ATTR_SETNAME2])) return -IPSET_ERR_PROTOCOL; set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); if (!set) return -ENOENT; write_lock_bh(&ip_set_ref_lock); if (set->ref != 0 || set->ref_netlink != 0) { ret = -IPSET_ERR_REFERENCED; goto out; } name2 = nla_data(attr[IPSET_ATTR_SETNAME2]); for (i = 0; i < inst->ip_set_max; i++) { s = ip_set(inst, i); if (s && STRNCMP(s->name, name2)) { ret = -IPSET_ERR_EXIST_SETNAME2; goto out; } } strscpy_pad(set->name, name2, IPSET_MAXNAMELEN); out: write_unlock_bh(&ip_set_ref_lock); return ret; } /* Swap two sets so that name/index points to the other. * References and set names are also swapped. * * The commands are serialized by the nfnl mutex and references are * protected by the ip_set_ref_lock. The kernel interfaces * do not hold the mutex but the pointer settings are atomic * so the ip_set_list always contains valid pointers to the sets. */ static int ip_set_swap(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *from, *to; ip_set_id_t from_id, to_id; char from_name[IPSET_MAXNAMELEN]; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || !attr[IPSET_ATTR_SETNAME2])) return -IPSET_ERR_PROTOCOL; from = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]), &from_id); if (!from) return -ENOENT; to = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME2]), &to_id); if (!to) return -IPSET_ERR_EXIST_SETNAME2; /* Features must not change. * Not an artifical restriction anymore, as we must prevent * possible loops created by swapping in setlist type of sets. */ if (!(from->type->features == to->type->features && from->family == to->family)) return -IPSET_ERR_TYPE_MISMATCH; write_lock_bh(&ip_set_ref_lock); if (from->ref_netlink || to->ref_netlink) { write_unlock_bh(&ip_set_ref_lock); return -EBUSY; } strscpy_pad(from_name, from->name, IPSET_MAXNAMELEN); strscpy_pad(from->name, to->name, IPSET_MAXNAMELEN); strscpy_pad(to->name, from_name, IPSET_MAXNAMELEN); swap(from->ref, to->ref); ip_set(inst, from_id) = to; ip_set(inst, to_id) = from; write_unlock_bh(&ip_set_ref_lock); return 0; } /* List/save set data */ #define DUMP_INIT 0 #define DUMP_ALL 1 #define DUMP_ONE 2 #define DUMP_LAST 3 #define DUMP_TYPE(arg) (((u32)(arg)) & 0x0000FFFF) #define DUMP_FLAGS(arg) (((u32)(arg)) >> 16) int ip_set_put_flags(struct sk_buff *skb, struct ip_set *set) { u32 cadt_flags = 0; if (SET_WITH_TIMEOUT(set)) if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(set->timeout)))) return -EMSGSIZE; if (SET_WITH_COUNTER(set)) cadt_flags |= IPSET_FLAG_WITH_COUNTERS; if (SET_WITH_COMMENT(set)) cadt_flags |= IPSET_FLAG_WITH_COMMENT; if (SET_WITH_SKBINFO(set)) cadt_flags |= IPSET_FLAG_WITH_SKBINFO; if (SET_WITH_FORCEADD(set)) cadt_flags |= IPSET_FLAG_WITH_FORCEADD; if (!cadt_flags) return 0; return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags)); } EXPORT_SYMBOL_GPL(ip_set_put_flags); static int ip_set_dump_done(struct netlink_callback *cb) { if (cb->args[IPSET_CB_ARG0]) { struct ip_set_net *inst = (struct ip_set_net *)cb->args[IPSET_CB_NET]; ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX]; struct ip_set *set = ip_set_ref_netlink(inst, index); if (set->variant->uref) set->variant->uref(set, cb, false); pr_debug("release set %s\n", set->name); __ip_set_put_netlink(set); } return 0; } static inline void dump_attrs(struct nlmsghdr *nlh) { const struct nlattr *attr; int rem; pr_debug("dump nlmsg\n"); nlmsg_for_each_attr(attr, nlh, sizeof(struct nfgenmsg), rem) { pr_debug("type: %u, len %u\n", nla_type(attr), attr->nla_len); } } static const struct nla_policy ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, [IPSET_ATTR_FLAGS] = { .type = NLA_U32 }, }; static int ip_set_dump_start(struct netlink_callback *cb) { struct nlmsghdr *nlh = nlmsg_hdr(cb->skb); int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1]; struct nlattr *attr = (void *)nlh + min_len; struct sk_buff *skb = cb->skb; struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk)); u32 dump_type; int ret; ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr, nlh->nlmsg_len - min_len, ip_set_dump_policy, NULL); if (ret) goto error; cb->args[IPSET_CB_PROTO] = nla_get_u8(cda[IPSET_ATTR_PROTOCOL]); if (cda[IPSET_ATTR_SETNAME]) { ip_set_id_t index; struct ip_set *set; set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]), &index); if (!set) { ret = -ENOENT; goto error; } dump_type = DUMP_ONE; cb->args[IPSET_CB_INDEX] = index; } else { dump_type = DUMP_ALL; } if (cda[IPSET_ATTR_FLAGS]) { u32 f = ip_set_get_h32(cda[IPSET_ATTR_FLAGS]); dump_type |= (f << 16); } cb->args[IPSET_CB_NET] = (unsigned long)inst; cb->args[IPSET_CB_DUMP] = dump_type; return 0; error: /* We have to create and send the error message manually :-( */ if (nlh->nlmsg_flags & NLM_F_ACK) { netlink_ack(cb->skb, nlh, ret, NULL); } return ret; } static int ip_set_dump_do(struct sk_buff *skb, struct netlink_callback *cb) { ip_set_id_t index = IPSET_INVALID_ID, max; struct ip_set *set = NULL; struct nlmsghdr *nlh = NULL; unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0; struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk)); u32 dump_type, dump_flags; bool is_destroyed; int ret = 0; if (!cb->args[IPSET_CB_DUMP]) return -EINVAL; if (cb->args[IPSET_CB_INDEX] >= inst->ip_set_max) goto out; dump_type = DUMP_TYPE(cb->args[IPSET_CB_DUMP]); dump_flags = DUMP_FLAGS(cb->args[IPSET_CB_DUMP]); max = dump_type == DUMP_ONE ? cb->args[IPSET_CB_INDEX] + 1 : inst->ip_set_max; dump_last: pr_debug("dump type, flag: %u %u index: %ld\n", dump_type, dump_flags, cb->args[IPSET_CB_INDEX]); for (; cb->args[IPSET_CB_INDEX] < max; cb->args[IPSET_CB_INDEX]++) { index = (ip_set_id_t)cb->args[IPSET_CB_INDEX]; write_lock_bh(&ip_set_ref_lock); set = ip_set(inst, index); is_destroyed = inst->is_destroyed; if (!set || is_destroyed) { write_unlock_bh(&ip_set_ref_lock); if (dump_type == DUMP_ONE) { ret = -ENOENT; goto out; } if (is_destroyed) { /* All sets are just being destroyed */ ret = 0; goto out; } continue; } /* When dumping all sets, we must dump "sorted" * so that lists (unions of sets) are dumped last. */ if (dump_type != DUMP_ONE && ((dump_type == DUMP_ALL) == !!(set->type->features & IPSET_DUMP_LAST))) { write_unlock_bh(&ip_set_ref_lock); continue; } pr_debug("List set: %s\n", set->name); if (!cb->args[IPSET_CB_ARG0]) { /* Start listing: make sure set won't be destroyed */ pr_debug("reference set\n"); set->ref_netlink++; } write_unlock_bh(&ip_set_ref_lock); nlh = start_msg(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags, IPSET_CMD_LIST); if (!nlh) { ret = -EMSGSIZE; goto release_refcount; } if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL, cb->args[IPSET_CB_PROTO]) || nla_put_string(skb, IPSET_ATTR_SETNAME, set->name)) goto nla_put_failure; if (dump_flags & IPSET_FLAG_LIST_SETNAME) goto next_set; switch (cb->args[IPSET_CB_ARG0]) { case 0: /* Core header data */ if (nla_put_string(skb, IPSET_ATTR_TYPENAME, set->type->name) || nla_put_u8(skb, IPSET_ATTR_FAMILY, set->family) || nla_put_u8(skb, IPSET_ATTR_REVISION, set->revision)) goto nla_put_failure; if (cb->args[IPSET_CB_PROTO] > IPSET_PROTOCOL_MIN && nla_put_net16(skb, IPSET_ATTR_INDEX, htons(index))) goto nla_put_failure; ret = set->variant->head(set, skb); if (ret < 0) goto release_refcount; if (dump_flags & IPSET_FLAG_LIST_HEADER) goto next_set; if (set->variant->uref) set->variant->uref(set, cb, true); fallthrough; default: ret = set->variant->list(set, skb, cb); if (!cb->args[IPSET_CB_ARG0]) /* Set is done, proceed with next one */ goto next_set; goto release_refcount; } } /* If we dump all sets, continue with dumping last ones */ if (dump_type == DUMP_ALL) { dump_type = DUMP_LAST; cb->args[IPSET_CB_DUMP] = dump_type | (dump_flags << 16); cb->args[IPSET_CB_INDEX] = 0; if (set && set->variant->uref) set->variant->uref(set, cb, false); goto dump_last; } goto out; nla_put_failure: ret = -EFAULT; next_set: if (dump_type == DUMP_ONE) cb->args[IPSET_CB_INDEX] = IPSET_INVALID_ID; else cb->args[IPSET_CB_INDEX]++; release_refcount: /* If there was an error or set is done, release set */ if (ret || !cb->args[IPSET_CB_ARG0]) { set = ip_set_ref_netlink(inst, index); if (set->variant->uref) set->variant->uref(set, cb, false); pr_debug("release set %s\n", set->name); __ip_set_put_netlink(set); cb->args[IPSET_CB_ARG0] = 0; } out: if (nlh) { nlmsg_end(skb, nlh); pr_debug("nlmsg_len: %u\n", nlh->nlmsg_len); dump_attrs(nlh); } return ret < 0 ? ret : skb->len; } static int ip_set_dump(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { if (unlikely(protocol_min_failed(attr))) return -IPSET_ERR_PROTOCOL; { struct netlink_dump_control c = { .start = ip_set_dump_start, .dump = ip_set_dump_do, .done = ip_set_dump_done, }; return netlink_dump_start(info->sk, skb, info->nlh, &c); } } /* Add, del and test */ static const struct nla_policy ip_set_adt_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_DATA] = { .type = NLA_NESTED }, [IPSET_ATTR_ADT] = { .type = NLA_NESTED }, }; static int call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb, struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 flags, bool use_lineno) { int ret; u32 lineno = 0; bool eexist = flags & IPSET_FLAG_EXIST, retried = false; do { if (retried) { __ip_set_get_netlink(set); nfnl_unlock(NFNL_SUBSYS_IPSET); cond_resched(); nfnl_lock(NFNL_SUBSYS_IPSET); __ip_set_put_netlink(set); } ip_set_lock(set); ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried); ip_set_unlock(set); retried = true; } while (ret == -ERANGE || (ret == -EAGAIN && set->variant->resize && (ret = set->variant->resize(set, retried)) == 0)); if (!ret || (ret == -IPSET_ERR_EXIST && eexist)) return 0; if (lineno && use_lineno) { /* Error in restore/batch mode: send back lineno */ struct nlmsghdr *rep, *nlh = nlmsg_hdr(skb); struct sk_buff *skb2; struct nlmsgerr *errmsg; size_t payload = min(SIZE_MAX, sizeof(*errmsg) + nlmsg_len(nlh)); int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1]; struct nlattr *cmdattr; u32 *errline; skb2 = nlmsg_new(payload, GFP_KERNEL); if (!skb2) return -ENOMEM; rep = nlmsg_put(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, NLMSG_ERROR, payload, 0); errmsg = nlmsg_data(rep); errmsg->error = ret; unsafe_memcpy(&errmsg->msg, nlh, nlh->nlmsg_len, /* Bounds checked by the skb layer. */); cmdattr = (void *)&errmsg->msg + min_len; ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, cmdattr, nlh->nlmsg_len - min_len, ip_set_adt_policy, NULL); if (ret) { nlmsg_free(skb2); return ret; } errline = nla_data(cda[IPSET_ATTR_LINENO]); *errline = lineno; nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); /* Signal netlink not to send its ACK/errmsg. */ return -EINTR; } return ret; } static int ip_set_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb, enum ipset_adt adt, const struct nlmsghdr *nlh, const struct nlattr * const attr[], struct netlink_ext_ack *extack) { struct ip_set_net *inst = ip_set_pernet(net); struct ip_set *set; struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {}; const struct nlattr *nla; u32 flags = flag_exist(nlh); bool use_lineno; int ret = 0; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || !((attr[IPSET_ATTR_DATA] != NULL) ^ (attr[IPSET_ATTR_ADT] != NULL)) || (attr[IPSET_ATTR_DATA] && !flag_nested(attr[IPSET_ATTR_DATA])) || (attr[IPSET_ATTR_ADT] && (!flag_nested(attr[IPSET_ATTR_ADT]) || !attr[IPSET_ATTR_LINENO])))) return -IPSET_ERR_PROTOCOL; set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); if (!set) return -ENOENT; use_lineno = !!attr[IPSET_ATTR_LINENO]; if (attr[IPSET_ATTR_DATA]) { if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], set->type->adt_policy, NULL)) return -IPSET_ERR_PROTOCOL; ret = call_ad(net, ctnl, skb, set, tb, adt, flags, use_lineno); } else { int nla_rem; nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) { if (nla_type(nla) != IPSET_ATTR_DATA || !flag_nested(nla) || nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla, set->type->adt_policy, NULL)) return -IPSET_ERR_PROTOCOL; ret = call_ad(net, ctnl, skb, set, tb, adt, flags, use_lineno); if (ret < 0) return ret; } } return ret; } static int ip_set_uadd(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { return ip_set_ad(info->net, info->sk, skb, IPSET_ADD, info->nlh, attr, info->extack); } static int ip_set_udel(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { return ip_set_ad(info->net, info->sk, skb, IPSET_DEL, info->nlh, attr, info->extack); } static int ip_set_utest(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct ip_set *set; struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {}; int ret = 0; u32 lineno; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME] || !attr[IPSET_ATTR_DATA] || !flag_nested(attr[IPSET_ATTR_DATA]))) return -IPSET_ERR_PROTOCOL; set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); if (!set) return -ENOENT; if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], set->type->adt_policy, NULL)) return -IPSET_ERR_PROTOCOL; rcu_read_lock_bh(); ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0); rcu_read_unlock_bh(); /* Userspace can't trigger element to be re-added */ if (ret == -EAGAIN) ret = 1; return ret > 0 ? 0 : -IPSET_ERR_EXIST; } /* Get headed data of a set */ static int ip_set_header(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); const struct ip_set *set; struct sk_buff *skb2; struct nlmsghdr *nlh2; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_SETNAME])) return -IPSET_ERR_PROTOCOL; set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); if (!set) return -ENOENT; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0, IPSET_CMD_HEADER); if (!nlh2) goto nlmsg_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) || nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) || nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) || nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) || nla_put_u8(skb2, IPSET_ATTR_REVISION, set->revision)) goto nla_put_failure; nlmsg_end(skb2, nlh2); return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); nla_put_failure: nlmsg_cancel(skb2, nlh2); nlmsg_failure: kfree_skb(skb2); return -EMSGSIZE; } /* Get type data */ static const struct nla_policy ip_set_type_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING, .len = IPSET_MAXNAMELEN - 1 }, [IPSET_ATTR_FAMILY] = { .type = NLA_U8 }, }; static int ip_set_type(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct sk_buff *skb2; struct nlmsghdr *nlh2; u8 family, min, max; const char *typename; int ret = 0; if (unlikely(protocol_min_failed(attr) || !attr[IPSET_ATTR_TYPENAME] || !attr[IPSET_ATTR_FAMILY])) return -IPSET_ERR_PROTOCOL; family = nla_get_u8(attr[IPSET_ATTR_FAMILY]); typename = nla_data(attr[IPSET_ATTR_TYPENAME]); ret = find_set_type_minmax(typename, family, &min, &max); if (ret) return ret; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0, IPSET_CMD_TYPE); if (!nlh2) goto nlmsg_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) || nla_put_string(skb2, IPSET_ATTR_TYPENAME, typename) || nla_put_u8(skb2, IPSET_ATTR_FAMILY, family) || nla_put_u8(skb2, IPSET_ATTR_REVISION, max) || nla_put_u8(skb2, IPSET_ATTR_REVISION_MIN, min)) goto nla_put_failure; nlmsg_end(skb2, nlh2); pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len); return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); nla_put_failure: nlmsg_cancel(skb2, nlh2); nlmsg_failure: kfree_skb(skb2); return -EMSGSIZE; } /* Get protocol version */ static const struct nla_policy ip_set_protocol_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, }; static int ip_set_protocol(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct sk_buff *skb2; struct nlmsghdr *nlh2; if (unlikely(!attr[IPSET_ATTR_PROTOCOL])) return -IPSET_ERR_PROTOCOL; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0, IPSET_CMD_PROTOCOL); if (!nlh2) goto nlmsg_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL)) goto nla_put_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL_MIN, IPSET_PROTOCOL_MIN)) goto nla_put_failure; nlmsg_end(skb2, nlh2); return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); nla_put_failure: nlmsg_cancel(skb2, nlh2); nlmsg_failure: kfree_skb(skb2); return -EMSGSIZE; } /* Get set by name or index, from userspace */ static int ip_set_byname(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct sk_buff *skb2; struct nlmsghdr *nlh2; ip_set_id_t id = IPSET_INVALID_ID; const struct ip_set *set; if (unlikely(protocol_failed(attr) || !attr[IPSET_ATTR_SETNAME])) return -IPSET_ERR_PROTOCOL; set = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]), &id); if (id == IPSET_INVALID_ID) return -ENOENT; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0, IPSET_CMD_GET_BYNAME); if (!nlh2) goto nlmsg_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) || nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) || nla_put_net16(skb2, IPSET_ATTR_INDEX, htons(id))) goto nla_put_failure; nlmsg_end(skb2, nlh2); return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); nla_put_failure: nlmsg_cancel(skb2, nlh2); nlmsg_failure: kfree_skb(skb2); return -EMSGSIZE; } static const struct nla_policy ip_set_index_policy[IPSET_ATTR_CMD_MAX + 1] = { [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 }, [IPSET_ATTR_INDEX] = { .type = NLA_U16 }, }; static int ip_set_byindex(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const attr[]) { struct ip_set_net *inst = ip_set_pernet(info->net); struct sk_buff *skb2; struct nlmsghdr *nlh2; ip_set_id_t id = IPSET_INVALID_ID; const struct ip_set *set; if (unlikely(protocol_failed(attr) || !attr[IPSET_ATTR_INDEX])) return -IPSET_ERR_PROTOCOL; id = ip_set_get_h16(attr[IPSET_ATTR_INDEX]); if (id >= inst->ip_set_max) return -ENOENT; set = ip_set(inst, id); if (set == NULL) return -ENOENT; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, 0, IPSET_CMD_GET_BYINDEX); if (!nlh2) goto nlmsg_failure; if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) || nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name)) goto nla_put_failure; nlmsg_end(skb2, nlh2); return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); nla_put_failure: nlmsg_cancel(skb2, nlh2); nlmsg_failure: kfree_skb(skb2); return -EMSGSIZE; } static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = { [IPSET_CMD_NONE] = { .call = ip_set_none, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, }, [IPSET_CMD_CREATE] = { .call = ip_set_create, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_create_policy, }, [IPSET_CMD_DESTROY] = { .call = ip_set_destroy, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname_policy, }, [IPSET_CMD_FLUSH] = { .call = ip_set_flush, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname_policy, }, [IPSET_CMD_RENAME] = { .call = ip_set_rename, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname2_policy, }, [IPSET_CMD_SWAP] = { .call = ip_set_swap, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname2_policy, }, [IPSET_CMD_LIST] = { .call = ip_set_dump, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_dump_policy, }, [IPSET_CMD_SAVE] = { .call = ip_set_dump, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname_policy, }, [IPSET_CMD_ADD] = { .call = ip_set_uadd, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_adt_policy, }, [IPSET_CMD_DEL] = { .call = ip_set_udel, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_adt_policy, }, [IPSET_CMD_TEST] = { .call = ip_set_utest, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_adt_policy, }, [IPSET_CMD_HEADER] = { .call = ip_set_header, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname_policy, }, [IPSET_CMD_TYPE] = { .call = ip_set_type, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_type_policy, }, [IPSET_CMD_PROTOCOL] = { .call = ip_set_protocol, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_protocol_policy, }, [IPSET_CMD_GET_BYNAME] = { .call = ip_set_byname, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_setname_policy, }, [IPSET_CMD_GET_BYINDEX] = { .call = ip_set_byindex, .type = NFNL_CB_MUTEX, .attr_count = IPSET_ATTR_CMD_MAX, .policy = ip_set_index_policy, }, }; static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = { .name = "ip_set", .subsys_id = NFNL_SUBSYS_IPSET, .cb_count = IPSET_MSG_MAX, .cb = ip_set_netlink_subsys_cb, }; /* Interface to iptables/ip6tables */ static int ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) { unsigned int *op; void *data; int copylen = *len, ret = 0; struct net *net = sock_net(sk); struct ip_set_net *inst = ip_set_pernet(net); if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; if (optval != SO_IP_SET) return -EBADF; if (*len < sizeof(unsigned int)) return -EINVAL; data = vmalloc(*len); if (!data) return -ENOMEM; if (copy_from_user(data, user, *len) != 0) { ret = -EFAULT; goto done; } op = data; if (*op < IP_SET_OP_VERSION) { /* Check the version at the beginning of operations */ struct ip_set_req_version *req_version = data; if (*len < sizeof(struct ip_set_req_version)) { ret = -EINVAL; goto done; } if (req_version->version < IPSET_PROTOCOL_MIN) { ret = -EPROTO; goto done; } } switch (*op) { case IP_SET_OP_VERSION: { struct ip_set_req_version *req_version = data; if (*len != sizeof(struct ip_set_req_version)) { ret = -EINVAL; goto done; } req_version->version = IPSET_PROTOCOL; if (copy_to_user(user, req_version, sizeof(struct ip_set_req_version))) ret = -EFAULT; goto done; } case IP_SET_OP_GET_BYNAME: { struct ip_set_req_get_set *req_get = data; ip_set_id_t id; if (*len != sizeof(struct ip_set_req_get_set)) { ret = -EINVAL; goto done; } req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0'; nfnl_lock(NFNL_SUBSYS_IPSET); find_set_and_id(inst, req_get->set.name, &id); req_get->set.index = id; nfnl_unlock(NFNL_SUBSYS_IPSET); goto copy; } case IP_SET_OP_GET_FNAME: { struct ip_set_req_get_set_family *req_get = data; ip_set_id_t id; if (*len != sizeof(struct ip_set_req_get_set_family)) { ret = -EINVAL; goto done; } req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0'; nfnl_lock(NFNL_SUBSYS_IPSET); find_set_and_id(inst, req_get->set.name, &id); req_get->set.index = id; if (id != IPSET_INVALID_ID) req_get->family = ip_set(inst, id)->family; nfnl_unlock(NFNL_SUBSYS_IPSET); goto copy; } case IP_SET_OP_GET_BYINDEX: { struct ip_set_req_get_set *req_get = data; struct ip_set *set; if (*len != sizeof(struct ip_set_req_get_set) || req_get->set.index >= inst->ip_set_max) { ret = -EINVAL; goto done; } nfnl_lock(NFNL_SUBSYS_IPSET); set = ip_set(inst, req_get->set.index); ret = strscpy(req_get->set.name, set ? set->name : "", IPSET_MAXNAMELEN); nfnl_unlock(NFNL_SUBSYS_IPSET); if (ret < 0) goto done; goto copy; } default: ret = -EBADMSG; goto done; } /* end of switch(op) */ copy: if (copy_to_user(user, data, copylen)) ret = -EFAULT; done: vfree(data); if (ret > 0) ret = 0; return ret; } static struct nf_sockopt_ops so_set __read_mostly = { .pf = PF_INET, .get_optmin = SO_IP_SET, .get_optmax = SO_IP_SET + 1, .get = ip_set_sockfn_get, .owner = THIS_MODULE, }; static int __net_init ip_set_net_init(struct net *net) { struct ip_set_net *inst = ip_set_pernet(net); struct ip_set **list; inst->ip_set_max = max_sets ? max_sets : CONFIG_IP_SET_MAX; if (inst->ip_set_max >= IPSET_INVALID_ID) inst->ip_set_max = IPSET_INVALID_ID - 1; list = kvcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL); if (!list) return -ENOMEM; inst->is_deleted = false; inst->is_destroyed = false; rcu_assign_pointer(inst->ip_set_list, list); return 0; } static void __net_exit ip_set_net_pre_exit(struct net *net) { struct ip_set_net *inst = ip_set_pernet(net); inst->is_deleted = true; /* flag for ip_set_nfnl_put */ } static void __net_exit ip_set_net_exit(struct net *net) { struct ip_set_net *inst = ip_set_pernet(net); _destroy_all_sets(inst); kvfree(rcu_dereference_protected(inst->ip_set_list, 1)); } static struct pernet_operations ip_set_net_ops = { .init = ip_set_net_init, .pre_exit = ip_set_net_pre_exit, .exit = ip_set_net_exit, .id = &ip_set_net_id, .size = sizeof(struct ip_set_net), }; static int __init ip_set_init(void) { int ret = register_pernet_subsys(&ip_set_net_ops); if (ret) { pr_err("ip_set: cannot register pernet_subsys.\n"); return ret; } ret = nfnetlink_subsys_register(&ip_set_netlink_subsys); if (ret != 0) { pr_err("ip_set: cannot register with nfnetlink.\n"); unregister_pernet_subsys(&ip_set_net_ops); return ret; } ret = nf_register_sockopt(&so_set); if (ret != 0) { pr_err("SO_SET registry failed: %d\n", ret); nfnetlink_subsys_unregister(&ip_set_netlink_subsys); unregister_pernet_subsys(&ip_set_net_ops); return ret; } return 0; } static void __exit ip_set_fini(void) { nf_unregister_sockopt(&so_set); nfnetlink_subsys_unregister(&ip_set_netlink_subsys); unregister_pernet_subsys(&ip_set_net_ops); /* Wait for call_rcu() in destroy */ rcu_barrier(); pr_debug("these are the famous last words\n"); } module_init(ip_set_init); module_exit(ip_set_fini); MODULE_DESCRIPTION("ip_set: protocol " __stringify(IPSET_PROTOCOL));
11 6 6 6 6 6 5 12 6 6 6 6 48 48 13 21 20 19 20 12 18 1 17 1 16 14 15 10 6 22 12 12 12 5 12 7 12 11 9 11 6 11 12 8 12 1 1 1 11 6 11 11 5 11 11 19 22 22 22 22 12 12 11 12 12 12 12 12 12 7 13 10 10 13 13 7 5 33 33 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/string.h> #include <linux/in.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/siphash.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <net/red.h> /* Stochastic Fairness Queuing algorithm. ======================================= Source: Paul E. McKenney "Stochastic Fairness Queuing", IEEE INFOCOMM'90 Proceedings, San Francisco, 1990. Paul E. McKenney "Stochastic Fairness Queuing", "Interworking: Research and Experience", v.2, 1991, p.113-131. See also: M. Shreedhar and George Varghese "Efficient Fair Queuing using Deficit Round Robin", Proc. SIGCOMM 95. This is not the thing that is usually called (W)FQ nowadays. It does not use any timestamp mechanism, but instead processes queues in round-robin order. ADVANTAGE: - It is very cheap. Both CPU and memory requirements are minimal. DRAWBACKS: - "Stochastic" -> It is not 100% fair. When hash collisions occur, several flows are considered as one. - "Round-robin" -> It introduces larger delays than virtual clock based schemes, and should not be used for isolating interactive traffic from non-interactive. It means, that this scheduler should be used as leaf of CBQ or P3, which put interactive traffic to higher priority band. We still need true WFQ for top level CSZ, but using WFQ for the best effort traffic is absolutely pointless: SFQ is superior for this purpose. IMPLEMENTATION: This implementation limits : - maximal queue length per flow to 127 packets. - max mtu to 2^18-1; - max 65408 flows, - number of hash buckets to 65536. It is easy to increase these values, but not in flight. */ #define SFQ_MAX_DEPTH 127 /* max number of packets per flow */ #define SFQ_DEFAULT_FLOWS 128 #define SFQ_MAX_FLOWS (0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */ #define SFQ_EMPTY_SLOT 0xffff #define SFQ_DEFAULT_HASH_DIVISOR 1024 /* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */ typedef u16 sfq_index; /* * We dont use pointers to save space. * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH] * are 'pointers' to dep[] array */ struct sfq_head { sfq_index next; sfq_index prev; }; struct sfq_slot { struct sk_buff *skblist_next; struct sk_buff *skblist_prev; sfq_index qlen; /* number of skbs in skblist */ sfq_index next; /* next slot in sfq RR chain */ struct sfq_head dep; /* anchor in dep[] chains */ unsigned short hash; /* hash value (index in ht[]) */ int allot; /* credit for this slot */ unsigned int backlog; struct red_vars vars; }; struct sfq_sched_data { /* frequently used fields */ int limit; /* limit of total number of packets in this qdisc */ unsigned int divisor; /* number of slots in hash table */ u8 headdrop; u8 maxdepth; /* limit of packets per flow */ siphash_key_t perturbation; u8 cur_depth; /* depth of longest slot */ u8 flags; struct tcf_proto __rcu *filter_list; struct tcf_block *block; sfq_index *ht; /* Hash table ('divisor' slots) */ struct sfq_slot *slots; /* Flows table ('maxflows' entries) */ struct red_parms *red_parms; struct tc_sfqred_stats stats; struct sfq_slot *tail; /* current slot in round */ struct sfq_head dep[SFQ_MAX_DEPTH + 1]; /* Linked lists of slots, indexed by depth * dep[0] : list of unused flows * dep[1] : list of flows with 1 packet * dep[X] : list of flows with X packets */ unsigned int maxflows; /* number of flows in flows array */ int perturb_period; unsigned int quantum; /* Allotment per round: MUST BE >= MTU */ struct timer_list perturb_timer; struct Qdisc *sch; }; /* * sfq_head are either in a sfq_slot or in dep[] array */ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) { if (val < SFQ_MAX_FLOWS) return &q->slots[val].dep; return &q->dep[val - SFQ_MAX_FLOWS]; } static unsigned int sfq_hash(const struct sfq_sched_data *q, const struct sk_buff *skb) { return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1); } static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct sfq_sched_data *q = qdisc_priv(sch); struct tcf_result res; struct tcf_proto *fl; int result; if (TC_H_MAJ(skb->priority) == sch->handle && TC_H_MIN(skb->priority) > 0 && TC_H_MIN(skb->priority) <= q->divisor) return TC_H_MIN(skb->priority); fl = rcu_dereference_bh(q->filter_list); if (!fl) return sfq_hash(q, skb) + 1; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; result = tcf_classify(skb, NULL, fl, &res, false); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; fallthrough; case TC_ACT_SHOT: return 0; } #endif if (TC_H_MIN(res.classid) <= q->divisor) return TC_H_MIN(res.classid); } return 0; } /* * x : slot number [0 .. SFQ_MAX_FLOWS - 1] */ static inline void sfq_link(struct sfq_sched_data *q, sfq_index x) { sfq_index p, n; struct sfq_slot *slot = &q->slots[x]; int qlen = slot->qlen; p = qlen + SFQ_MAX_FLOWS; n = q->dep[qlen].next; slot->dep.next = n; slot->dep.prev = p; q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */ sfq_dep_head(q, n)->prev = x; } #define sfq_unlink(q, x, n, p) \ do { \ n = q->slots[x].dep.next; \ p = q->slots[x].dep.prev; \ sfq_dep_head(q, p)->next = n; \ sfq_dep_head(q, n)->prev = p; \ } while (0) static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x) { sfq_index p, n; int d; sfq_unlink(q, x, n, p); d = q->slots[x].qlen--; if (n == p && q->cur_depth == d) q->cur_depth--; sfq_link(q, x); } static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x) { sfq_index p, n; int d; sfq_unlink(q, x, n, p); d = ++q->slots[x].qlen; if (q->cur_depth < d) q->cur_depth = d; sfq_link(q, x); } /* helper functions : might be changed when/if skb use a standard list_head */ /* remove one skb from tail of slot queue */ static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot) { struct sk_buff *skb = slot->skblist_prev; slot->skblist_prev = skb->prev; skb->prev->next = (struct sk_buff *)slot; skb->next = skb->prev = NULL; return skb; } /* remove one skb from head of slot queue */ static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot) { struct sk_buff *skb = slot->skblist_next; slot->skblist_next = skb->next; skb->next->prev = (struct sk_buff *)slot; skb->next = skb->prev = NULL; return skb; } static inline void slot_queue_init(struct sfq_slot *slot) { memset(slot, 0, sizeof(*slot)); slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot; } /* add skb to slot queue (tail add) */ static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb) { skb->prev = slot->skblist_prev; skb->next = (struct sk_buff *)slot; slot->skblist_prev->next = skb; slot->skblist_prev = skb; } static unsigned int sfq_drop(struct Qdisc *sch, struct sk_buff **to_free) { struct sfq_sched_data *q = qdisc_priv(sch); sfq_index x, d = q->cur_depth; struct sk_buff *skb; unsigned int len; struct sfq_slot *slot; /* Queue is full! Find the longest slot and drop tail packet from it */ if (d > 1) { x = q->dep[d].next; slot = &q->slots[x]; drop: skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot); len = qdisc_pkt_len(skb); slot->backlog -= len; sfq_dec(q, x); sch->q.qlen--; qdisc_qstats_backlog_dec(sch, skb); qdisc_drop(skb, sch, to_free); return len; } if (d == 1) { /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */ x = q->tail->next; slot = &q->slots[x]; if (slot->next == x) q->tail = NULL; /* no more active slots */ else q->tail->next = slot->next; q->ht[slot->hash] = SFQ_EMPTY_SLOT; goto drop; } return 0; } /* Is ECN parameter configured */ static int sfq_prob_mark(const struct sfq_sched_data *q) { return q->flags & TC_RED_ECN; } /* Should packets over max threshold just be marked */ static int sfq_hard_mark(const struct sfq_sched_data *q) { return (q->flags & (TC_RED_ECN | TC_RED_HARDDROP)) == TC_RED_ECN; } static int sfq_headdrop(const struct sfq_sched_data *q) { return q->headdrop; } static int sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash, dropped; sfq_index x, qlen; struct sfq_slot *slot; int ret; struct sk_buff *head; int delta; hash = sfq_classify(skb, sch, &ret); if (hash == 0) { if (ret & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); __qdisc_drop(skb, to_free); return ret; } hash--; x = q->ht[hash]; slot = &q->slots[x]; if (x == SFQ_EMPTY_SLOT) { x = q->dep[0].next; /* get a free slot */ if (x >= SFQ_MAX_FLOWS) return qdisc_drop(skb, sch, to_free); q->ht[hash] = x; slot = &q->slots[x]; slot->hash = hash; slot->backlog = 0; /* should already be 0 anyway... */ red_set_vars(&slot->vars); goto enqueue; } if (q->red_parms) { slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms, &slot->vars, slot->backlog); switch (red_action(q->red_parms, &slot->vars, slot->vars.qavg)) { case RED_DONT_MARK: break; case RED_PROB_MARK: qdisc_qstats_overlimit(sch); if (sfq_prob_mark(q)) { /* We know we have at least one packet in queue */ if (sfq_headdrop(q) && INET_ECN_set_ce(slot->skblist_next)) { q->stats.prob_mark_head++; break; } if (INET_ECN_set_ce(skb)) { q->stats.prob_mark++; break; } } q->stats.prob_drop++; goto congestion_drop; case RED_HARD_MARK: qdisc_qstats_overlimit(sch); if (sfq_hard_mark(q)) { /* We know we have at least one packet in queue */ if (sfq_headdrop(q) && INET_ECN_set_ce(slot->skblist_next)) { q->stats.forced_mark_head++; break; } if (INET_ECN_set_ce(skb)) { q->stats.forced_mark++; break; } } q->stats.forced_drop++; goto congestion_drop; } } if (slot->qlen >= q->maxdepth) { congestion_drop: if (!sfq_headdrop(q)) return qdisc_drop(skb, sch, to_free); /* We know we have at least one packet in queue */ head = slot_dequeue_head(slot); delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb); sch->qstats.backlog -= delta; slot->backlog -= delta; qdisc_drop(head, sch, to_free); slot_queue_add(slot, skb); qdisc_tree_reduce_backlog(sch, 0, delta); return NET_XMIT_CN; } enqueue: qdisc_qstats_backlog_inc(sch, skb); slot->backlog += qdisc_pkt_len(skb); slot_queue_add(slot, skb); sfq_inc(q, x); if (slot->qlen == 1) { /* The flow is new */ if (q->tail == NULL) { /* It is the first flow */ slot->next = x; } else { slot->next = q->tail->next; q->tail->next = x; } /* We put this flow at the end of our flow list. * This might sound unfair for a new flow to wait after old ones, * but we could endup servicing new flows only, and freeze old ones. */ q->tail = slot; /* We could use a bigger initial quantum for new flows */ slot->allot = q->quantum; } if (++sch->q.qlen <= q->limit) return NET_XMIT_SUCCESS; qlen = slot->qlen; dropped = sfq_drop(sch, to_free); /* Return Congestion Notification only if we dropped a packet * from this flow. */ if (qlen != slot->qlen) { qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb)); return NET_XMIT_CN; } /* As we dropped a packet, better let upper stack know this */ qdisc_tree_reduce_backlog(sch, 1, dropped); return NET_XMIT_SUCCESS; } static struct sk_buff * sfq_dequeue(struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; sfq_index a, next_a; struct sfq_slot *slot; /* No active slots */ if (q->tail == NULL) return NULL; next_slot: a = q->tail->next; slot = &q->slots[a]; if (slot->allot <= 0) { q->tail = slot; slot->allot += q->quantum; goto next_slot; } skb = slot_dequeue_head(slot); sfq_dec(q, a); qdisc_bstats_update(sch, skb); sch->q.qlen--; qdisc_qstats_backlog_dec(sch, skb); slot->backlog -= qdisc_pkt_len(skb); /* Is the slot empty? */ if (slot->qlen == 0) { q->ht[slot->hash] = SFQ_EMPTY_SLOT; next_a = slot->next; if (a == next_a) { q->tail = NULL; /* no more active slots */ return skb; } q->tail->next = next_a; } else { slot->allot -= qdisc_pkt_len(skb); } return skb; } static void sfq_reset(struct Qdisc *sch) { struct sk_buff *skb; while ((skb = sfq_dequeue(sch)) != NULL) rtnl_kfree_skbs(skb, skb); } /* * When q->perturbation is changed, we rehash all queued skbs * to avoid OOO (Out Of Order) effects. * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change * counters. */ static void sfq_rehash(struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; int i; struct sfq_slot *slot; struct sk_buff_head list; int dropped = 0; unsigned int drop_len = 0; __skb_queue_head_init(&list); for (i = 0; i < q->maxflows; i++) { slot = &q->slots[i]; if (!slot->qlen) continue; while (slot->qlen) { skb = slot_dequeue_head(slot); sfq_dec(q, i); __skb_queue_tail(&list, skb); } slot->backlog = 0; red_set_vars(&slot->vars); q->ht[slot->hash] = SFQ_EMPTY_SLOT; } q->tail = NULL; while ((skb = __skb_dequeue(&list)) != NULL) { unsigned int hash = sfq_hash(q, skb); sfq_index x = q->ht[hash]; slot = &q->slots[x]; if (x == SFQ_EMPTY_SLOT) { x = q->dep[0].next; /* get a free slot */ if (x >= SFQ_MAX_FLOWS) { drop: qdisc_qstats_backlog_dec(sch, skb); drop_len += qdisc_pkt_len(skb); kfree_skb(skb); dropped++; continue; } q->ht[hash] = x; slot = &q->slots[x]; slot->hash = hash; } if (slot->qlen >= q->maxdepth) goto drop; slot_queue_add(slot, skb); if (q->red_parms) slot->vars.qavg = red_calc_qavg(q->red_parms, &slot->vars, slot->backlog); slot->backlog += qdisc_pkt_len(skb); sfq_inc(q, x); if (slot->qlen == 1) { /* The flow is new */ if (q->tail == NULL) { /* It is the first flow */ slot->next = x; } else { slot->next = q->tail->next; q->tail->next = x; } q->tail = slot; slot->allot = q->quantum; } } sch->q.qlen -= dropped; qdisc_tree_reduce_backlog(sch, dropped, drop_len); } static void sfq_perturbation(struct timer_list *t) { struct sfq_sched_data *q = timer_container_of(q, t, perturb_timer); struct Qdisc *sch = q->sch; spinlock_t *root_lock; siphash_key_t nkey; int period; get_random_bytes(&nkey, sizeof(nkey)); rcu_read_lock(); root_lock = qdisc_lock(qdisc_root_sleeping(sch)); spin_lock(root_lock); q->perturbation = nkey; if (!q->filter_list && q->tail) sfq_rehash(sch); spin_unlock(root_lock); /* q->perturb_period can change under us from * sfq_change() and sfq_destroy(). */ period = READ_ONCE(q->perturb_period); if (period) mod_timer(&q->perturb_timer, jiffies + period); rcu_read_unlock(); } static int sfq_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct sfq_sched_data *q = qdisc_priv(sch); struct tc_sfq_qopt *ctl = nla_data(opt); struct tc_sfq_qopt_v1 *ctl_v1 = NULL; unsigned int qlen, dropped = 0; struct red_parms *p = NULL; struct sk_buff *to_free = NULL; struct sk_buff *tail = NULL; unsigned int maxflows; unsigned int quantum; unsigned int divisor; int perturb_period; u8 headdrop; u8 maxdepth; int limit; u8 flags; if (opt->nla_len < nla_attr_size(sizeof(*ctl))) return -EINVAL; if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1))) ctl_v1 = nla_data(opt); if (ctl->divisor && (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) return -EINVAL; if ((int)ctl->quantum < 0) { NL_SET_ERR_MSG_MOD(extack, "invalid quantum"); return -EINVAL; } if (ctl->perturb_period < 0 || ctl->perturb_period > INT_MAX / HZ) { NL_SET_ERR_MSG_MOD(extack, "invalid perturb period"); return -EINVAL; } perturb_period = ctl->perturb_period * HZ; if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, ctl_v1->Wlog, ctl_v1->Scell_log, NULL)) return -EINVAL; if (ctl_v1 && ctl_v1->qth_min) { p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; } sch_tree_lock(sch); limit = q->limit; divisor = q->divisor; headdrop = q->headdrop; maxdepth = q->maxdepth; maxflows = q->maxflows; quantum = q->quantum; flags = q->flags; /* update and validate configuration */ if (ctl->quantum) quantum = ctl->quantum; if (ctl->flows) maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); if (ctl->divisor) { divisor = ctl->divisor; maxflows = min_t(u32, maxflows, divisor); } if (ctl_v1) { if (ctl_v1->depth) maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH); if (p) { red_set_parms(p, ctl_v1->qth_min, ctl_v1->qth_max, ctl_v1->Wlog, ctl_v1->Plog, ctl_v1->Scell_log, NULL, ctl_v1->max_P); } flags = ctl_v1->flags; headdrop = ctl_v1->headdrop; } if (ctl->limit) { limit = min_t(u32, ctl->limit, maxdepth * maxflows); maxflows = min_t(u32, maxflows, limit); } if (limit == 1) { sch_tree_unlock(sch); kfree(p); NL_SET_ERR_MSG_MOD(extack, "invalid limit"); return -EINVAL; } /* commit configuration */ q->limit = limit; q->divisor = divisor; q->headdrop = headdrop; q->maxdepth = maxdepth; q->maxflows = maxflows; WRITE_ONCE(q->perturb_period, perturb_period); q->quantum = quantum; q->flags = flags; if (p) swap(q->red_parms, p); qlen = sch->q.qlen; while (sch->q.qlen > q->limit) { dropped += sfq_drop(sch, &to_free); if (!tail) tail = to_free; } rtnl_kfree_skbs(to_free, tail); qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); timer_delete(&q->perturb_timer); if (q->perturb_period) { mod_timer(&q->perturb_timer, jiffies + q->perturb_period); get_random_bytes(&q->perturbation, sizeof(q->perturbation)); } sch_tree_unlock(sch); kfree(p); return 0; } static void *sfq_alloc(size_t sz) { return kvmalloc(sz, GFP_KERNEL); } static void sfq_free(void *addr) { kvfree(addr); } static void sfq_destroy(struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); tcf_block_put(q->block); WRITE_ONCE(q->perturb_period, 0); timer_delete_sync(&q->perturb_timer); sfq_free(q->ht); sfq_free(q->slots); kfree(q->red_parms); } static int sfq_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct sfq_sched_data *q = qdisc_priv(sch); int i; int err; q->sch = sch; timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE); err = tcf_block_get(&q->block, &q->filter_list, sch, extack); if (err) return err; for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) { q->dep[i].next = i + SFQ_MAX_FLOWS; q->dep[i].prev = i + SFQ_MAX_FLOWS; } q->limit = SFQ_MAX_DEPTH; q->maxdepth = SFQ_MAX_DEPTH; q->cur_depth = 0; q->tail = NULL; q->divisor = SFQ_DEFAULT_HASH_DIVISOR; q->maxflows = SFQ_DEFAULT_FLOWS; q->quantum = psched_mtu(qdisc_dev(sch)); q->perturb_period = 0; get_random_bytes(&q->perturbation, sizeof(q->perturbation)); if (opt) { int err = sfq_change(sch, opt, extack); if (err) return err; } q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor); q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows); if (!q->ht || !q->slots) { /* Note: sfq_destroy() will be called by our caller */ return -ENOMEM; } for (i = 0; i < q->divisor; i++) q->ht[i] = SFQ_EMPTY_SLOT; for (i = 0; i < q->maxflows; i++) { slot_queue_init(&q->slots[i]); sfq_link(q, i); } if (q->limit >= 1) sch->flags |= TCQ_F_CAN_BYPASS; else sch->flags &= ~TCQ_F_CAN_BYPASS; return 0; } static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned char *b = skb_tail_pointer(skb); struct tc_sfq_qopt_v1 opt; struct red_parms *p = q->red_parms; memset(&opt, 0, sizeof(opt)); opt.v0.quantum = q->quantum; opt.v0.perturb_period = q->perturb_period / HZ; opt.v0.limit = q->limit; opt.v0.divisor = q->divisor; opt.v0.flows = q->maxflows; opt.depth = q->maxdepth; opt.headdrop = q->headdrop; if (p) { opt.qth_min = p->qth_min >> p->Wlog; opt.qth_max = p->qth_max >> p->Wlog; opt.Wlog = p->Wlog; opt.Plog = p->Plog; opt.Scell_log = p->Scell_log; opt.max_P = p->max_P; } memcpy(&opt.stats, &q->stats, sizeof(opt.stats)); opt.flags = q->flags; if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) goto nla_put_failure; return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg) { return NULL; } static unsigned long sfq_find(struct Qdisc *sch, u32 classid) { return 0; } static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent, u32 classid) { return 0; } static void sfq_unbind(struct Qdisc *q, unsigned long cl) { } static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl, struct netlink_ext_ack *extack) { struct sfq_sched_data *q = qdisc_priv(sch); if (cl) return NULL; return q->block; } static int sfq_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { tcm->tcm_handle |= TC_H_MIN(cl); return 0; } static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct gnet_dump *d) { struct sfq_sched_data *q = qdisc_priv(sch); sfq_index idx = q->ht[cl - 1]; struct gnet_stats_queue qs = { 0 }; struct tc_sfq_xstats xstats = { 0 }; if (idx != SFQ_EMPTY_SLOT) { const struct sfq_slot *slot = &q->slots[idx]; xstats.allot = slot->allot; qs.qlen = slot->qlen; qs.backlog = slot->backlog; } if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) return -1; return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); } static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int i; if (arg->stop) return; for (i = 0; i < q->divisor; i++) { if (q->ht[i] == SFQ_EMPTY_SLOT) { arg->count++; continue; } if (!tc_qdisc_stats_dump(sch, i + 1, arg)) break; } } static const struct Qdisc_class_ops sfq_class_ops = { .leaf = sfq_leaf, .find = sfq_find, .tcf_block = sfq_tcf_block, .bind_tcf = sfq_bind, .unbind_tcf = sfq_unbind, .dump = sfq_dump_class, .dump_stats = sfq_dump_class_stats, .walk = sfq_walk, }; static struct Qdisc_ops sfq_qdisc_ops __read_mostly = { .cl_ops = &sfq_class_ops, .id = "sfq", .priv_size = sizeof(struct sfq_sched_data), .enqueue = sfq_enqueue, .dequeue = sfq_dequeue, .peek = qdisc_peek_dequeued, .init = sfq_init, .reset = sfq_reset, .destroy = sfq_destroy, .change = NULL, .dump = sfq_dump, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("sfq"); static int __init sfq_module_init(void) { return register_qdisc(&sfq_qdisc_ops); } static void __exit sfq_module_exit(void) { unregister_qdisc(&sfq_qdisc_ops); } module_init(sfq_module_init) module_exit(sfq_module_exit) MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Stochastic Fairness qdisc");
2 4 3 4 4 3 2 2 3 3 2 3 2 1 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 // SPDX-License-Identifier: GPL-2.0-or-later /* * Glue Code for AVX assembler versions of Serpent Cipher * * Copyright (C) 2012 Johannes Goetzfried * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> * * Copyright © 2011-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> */ #include <linux/module.h> #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> #include <linux/export.h> #include <crypto/algapi.h> #include <crypto/serpent.h> #include "serpent-avx.h" #include "ecb_cbc_helpers.h" /* 8-way parallel cipher functions */ asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(serpent_ecb_enc_8way_avx); asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(serpent_ecb_dec_8way_avx); asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx); static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen); } static int ecb_encrypt(struct skcipher_request *req) { ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_enc_8way_avx); ECB_BLOCK(1, __serpent_encrypt); ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_dec_8way_avx); ECB_BLOCK(1, __serpent_decrypt); ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1); CBC_ENC_BLOCK(__serpent_encrypt); CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS); CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_cbc_dec_8way_avx); CBC_DEC_BLOCK(1, __serpent_decrypt); CBC_WALK_END(); } static struct skcipher_alg serpent_algs[] = { { .base.cra_name = "ecb(serpent)", .base.cra_driver_name = "ecb-serpent-avx", .base.cra_priority = 500, .base.cra_blocksize = SERPENT_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct serpent_ctx), .base.cra_module = THIS_MODULE, .min_keysize = SERPENT_MIN_KEY_SIZE, .max_keysize = SERPENT_MAX_KEY_SIZE, .setkey = serpent_setkey_skcipher, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { .base.cra_name = "cbc(serpent)", .base.cra_driver_name = "cbc-serpent-avx", .base.cra_priority = 500, .base.cra_blocksize = SERPENT_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct serpent_ctx), .base.cra_module = THIS_MODULE, .min_keysize = SERPENT_MIN_KEY_SIZE, .max_keysize = SERPENT_MAX_KEY_SIZE, .ivsize = SERPENT_BLOCK_SIZE, .setkey = serpent_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, }; static int __init serpent_init(void) { const char *feature_name; if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, &feature_name)) { pr_info("CPU feature '%s' is not supported.\n", feature_name); return -ENODEV; } return crypto_register_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs)); } static void __exit serpent_exit(void) { crypto_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs)); } module_init(serpent_init); module_exit(serpent_exit); MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("serpent");
5 5 5 5 2 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 // SPDX-License-Identifier: GPL-2.0 /* * security/tomoyo/gc.c * * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include "common.h" #include <linux/kthread.h> #include <linux/slab.h> /** * tomoyo_memory_free - Free memory for elements. * * @ptr: Pointer to allocated memory. * * Returns nothing. * * Caller holds tomoyo_policy_lock mutex. */ static inline void tomoyo_memory_free(void *ptr) { tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= ksize(ptr); kfree(ptr); } /* The list for "struct tomoyo_io_buffer". */ static LIST_HEAD(tomoyo_io_buffer_list); /* Lock for protecting tomoyo_io_buffer_list. */ static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock); /** * tomoyo_struct_used_by_io_buffer - Check whether the list element is used by /sys/kernel/security/tomoyo/ users or not. * * @element: Pointer to "struct list_head". * * Returns true if @element is used by /sys/kernel/security/tomoyo/ users, * false otherwise. */ static bool tomoyo_struct_used_by_io_buffer(const struct list_head *element) { struct tomoyo_io_buffer *head; bool in_use = false; spin_lock(&tomoyo_io_buffer_list_lock); list_for_each_entry(head, &tomoyo_io_buffer_list, list) { head->users++; spin_unlock(&tomoyo_io_buffer_list_lock); mutex_lock(&head->io_sem); if (head->r.domain == element || head->r.group == element || head->r.acl == element || &head->w.domain->list == element) in_use = true; mutex_unlock(&head->io_sem); spin_lock(&tomoyo_io_buffer_list_lock); head->users--; if (in_use) break; } spin_unlock(&tomoyo_io_buffer_list_lock); return in_use; } /** * tomoyo_name_used_by_io_buffer - Check whether the string is used by /sys/kernel/security/tomoyo/ users or not. * * @string: String to check. * * Returns true if @string is used by /sys/kernel/security/tomoyo/ users, * false otherwise. */ static bool tomoyo_name_used_by_io_buffer(const char *string) { struct tomoyo_io_buffer *head; const size_t size = strlen(string) + 1; bool in_use = false; spin_lock(&tomoyo_io_buffer_list_lock); list_for_each_entry(head, &tomoyo_io_buffer_list, list) { int i; head->users++; spin_unlock(&tomoyo_io_buffer_list_lock); mutex_lock(&head->io_sem); for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) { const char *w = head->r.w[i]; if (w < string || w > string + size) continue; in_use = true; break; } mutex_unlock(&head->io_sem); spin_lock(&tomoyo_io_buffer_list_lock); head->users--; if (in_use) break; } spin_unlock(&tomoyo_io_buffer_list_lock); return in_use; } /** * tomoyo_del_transition_control - Delete members in "struct tomoyo_transition_control". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static inline void tomoyo_del_transition_control(struct list_head *element) { struct tomoyo_transition_control *ptr = container_of(element, typeof(*ptr), head.list); tomoyo_put_name(ptr->domainname); tomoyo_put_name(ptr->program); } /** * tomoyo_del_aggregator - Delete members in "struct tomoyo_aggregator". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static inline void tomoyo_del_aggregator(struct list_head *element) { struct tomoyo_aggregator *ptr = container_of(element, typeof(*ptr), head.list); tomoyo_put_name(ptr->original_name); tomoyo_put_name(ptr->aggregated_name); } /** * tomoyo_del_manager - Delete members in "struct tomoyo_manager". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static inline void tomoyo_del_manager(struct list_head *element) { struct tomoyo_manager *ptr = container_of(element, typeof(*ptr), head.list); tomoyo_put_name(ptr->manager); } /** * tomoyo_del_acl - Delete members in "struct tomoyo_acl_info". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static void tomoyo_del_acl(struct list_head *element) { struct tomoyo_acl_info *acl = container_of(element, typeof(*acl), list); tomoyo_put_condition(acl->cond); switch (acl->type) { case TOMOYO_TYPE_PATH_ACL: { struct tomoyo_path_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_name_union(&entry->name); } break; case TOMOYO_TYPE_PATH2_ACL: { struct tomoyo_path2_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_name_union(&entry->name1); tomoyo_put_name_union(&entry->name2); } break; case TOMOYO_TYPE_PATH_NUMBER_ACL: { struct tomoyo_path_number_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_name_union(&entry->name); tomoyo_put_number_union(&entry->number); } break; case TOMOYO_TYPE_MKDEV_ACL: { struct tomoyo_mkdev_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_name_union(&entry->name); tomoyo_put_number_union(&entry->mode); tomoyo_put_number_union(&entry->major); tomoyo_put_number_union(&entry->minor); } break; case TOMOYO_TYPE_MOUNT_ACL: { struct tomoyo_mount_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_name_union(&entry->dev_name); tomoyo_put_name_union(&entry->dir_name); tomoyo_put_name_union(&entry->fs_type); tomoyo_put_number_union(&entry->flags); } break; case TOMOYO_TYPE_ENV_ACL: { struct tomoyo_env_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_name(entry->env); } break; case TOMOYO_TYPE_INET_ACL: { struct tomoyo_inet_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_group(entry->address.group); tomoyo_put_number_union(&entry->port); } break; case TOMOYO_TYPE_UNIX_ACL: { struct tomoyo_unix_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_name_union(&entry->name); } break; case TOMOYO_TYPE_MANUAL_TASK_ACL: { struct tomoyo_task_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_name(entry->domainname); } break; } } /** * tomoyo_del_domain - Delete members in "struct tomoyo_domain_info". * * @element: Pointer to "struct list_head". * * Returns nothing. * * Caller holds tomoyo_policy_lock mutex. */ static inline void tomoyo_del_domain(struct list_head *element) { struct tomoyo_domain_info *domain = container_of(element, typeof(*domain), list); struct tomoyo_acl_info *acl; struct tomoyo_acl_info *tmp; /* * Since this domain is referenced from neither * "struct tomoyo_io_buffer" nor "struct cred"->security, we can delete * elements without checking for is_deleted flag. */ list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) { tomoyo_del_acl(&acl->list); tomoyo_memory_free(acl); } tomoyo_put_name(domain->domainname); } /** * tomoyo_del_condition - Delete members in "struct tomoyo_condition". * * @element: Pointer to "struct list_head". * * Returns nothing. */ void tomoyo_del_condition(struct list_head *element) { struct tomoyo_condition *cond = container_of(element, typeof(*cond), head.list); const u16 condc = cond->condc; const u16 numbers_count = cond->numbers_count; const u16 names_count = cond->names_count; const u16 argc = cond->argc; const u16 envc = cond->envc; unsigned int i; const struct tomoyo_condition_element *condp = (const struct tomoyo_condition_element *) (cond + 1); struct tomoyo_number_union *numbers_p = (struct tomoyo_number_union *) (condp + condc); struct tomoyo_name_union *names_p = (struct tomoyo_name_union *) (numbers_p + numbers_count); const struct tomoyo_argv *argv = (const struct tomoyo_argv *) (names_p + names_count); const struct tomoyo_envp *envp = (const struct tomoyo_envp *) (argv + argc); for (i = 0; i < numbers_count; i++) tomoyo_put_number_union(numbers_p++); for (i = 0; i < names_count; i++) tomoyo_put_name_union(names_p++); for (i = 0; i < argc; argv++, i++) tomoyo_put_name(argv->value); for (i = 0; i < envc; envp++, i++) { tomoyo_put_name(envp->name); tomoyo_put_name(envp->value); } } /** * tomoyo_del_name - Delete members in "struct tomoyo_name". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static inline void tomoyo_del_name(struct list_head *element) { /* Nothing to do. */ } /** * tomoyo_del_path_group - Delete members in "struct tomoyo_path_group". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static inline void tomoyo_del_path_group(struct list_head *element) { struct tomoyo_path_group *member = container_of(element, typeof(*member), head.list); tomoyo_put_name(member->member_name); } /** * tomoyo_del_group - Delete "struct tomoyo_group". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static inline void tomoyo_del_group(struct list_head *element) { struct tomoyo_group *group = container_of(element, typeof(*group), head.list); tomoyo_put_name(group->group_name); } /** * tomoyo_del_address_group - Delete members in "struct tomoyo_address_group". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static inline void tomoyo_del_address_group(struct list_head *element) { /* Nothing to do. */ } /** * tomoyo_del_number_group - Delete members in "struct tomoyo_number_group". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static inline void tomoyo_del_number_group(struct list_head *element) { /* Nothing to do. */ } /** * tomoyo_try_to_gc - Try to kfree() an entry. * * @type: One of values in "enum tomoyo_policy_id". * @element: Pointer to "struct list_head". * * Returns nothing. * * Caller holds tomoyo_policy_lock mutex. */ static void tomoyo_try_to_gc(const enum tomoyo_policy_id type, struct list_head *element) { /* * __list_del_entry() guarantees that the list element became no longer * reachable from the list which the element was originally on (e.g. * tomoyo_domain_list). Also, synchronize_srcu() guarantees that the * list element became no longer referenced by syscall users. */ __list_del_entry(element); mutex_unlock(&tomoyo_policy_lock); synchronize_srcu(&tomoyo_ss); /* * However, there are two users which may still be using the list * element. We need to defer until both users forget this element. * * Don't kfree() until "struct tomoyo_io_buffer"->r.{domain,group,acl} * and "struct tomoyo_io_buffer"->w.domain forget this element. */ if (tomoyo_struct_used_by_io_buffer(element)) goto reinject; switch (type) { case TOMOYO_ID_TRANSITION_CONTROL: tomoyo_del_transition_control(element); break; case TOMOYO_ID_MANAGER: tomoyo_del_manager(element); break; case TOMOYO_ID_AGGREGATOR: tomoyo_del_aggregator(element); break; case TOMOYO_ID_GROUP: tomoyo_del_group(element); break; case TOMOYO_ID_PATH_GROUP: tomoyo_del_path_group(element); break; case TOMOYO_ID_ADDRESS_GROUP: tomoyo_del_address_group(element); break; case TOMOYO_ID_NUMBER_GROUP: tomoyo_del_number_group(element); break; case TOMOYO_ID_CONDITION: tomoyo_del_condition(element); break; case TOMOYO_ID_NAME: /* * Don't kfree() until all "struct tomoyo_io_buffer"->r.w[] * forget this element. */ if (tomoyo_name_used_by_io_buffer (container_of(element, typeof(struct tomoyo_name), head.list)->entry.name)) goto reinject; tomoyo_del_name(element); break; case TOMOYO_ID_ACL: tomoyo_del_acl(element); break; case TOMOYO_ID_DOMAIN: /* * Don't kfree() until all "struct cred"->security forget this * element. */ if (atomic_read(&container_of (element, typeof(struct tomoyo_domain_info), list)->users)) goto reinject; break; case TOMOYO_MAX_POLICY: break; } mutex_lock(&tomoyo_policy_lock); if (type == TOMOYO_ID_DOMAIN) tomoyo_del_domain(element); tomoyo_memory_free(element); return; reinject: /* * We can safely reinject this element here because * (1) Appending list elements and removing list elements are protected * by tomoyo_policy_lock mutex. * (2) Only this function removes list elements and this function is * exclusively executed by tomoyo_gc_mutex mutex. * are true. */ mutex_lock(&tomoyo_policy_lock); list_add_rcu(element, element->prev); } /** * tomoyo_collect_member - Delete elements with "struct tomoyo_acl_head". * * @id: One of values in "enum tomoyo_policy_id". * @member_list: Pointer to "struct list_head". * * Returns nothing. */ static void tomoyo_collect_member(const enum tomoyo_policy_id id, struct list_head *member_list) { struct tomoyo_acl_head *member; struct tomoyo_acl_head *tmp; list_for_each_entry_safe(member, tmp, member_list, list) { if (!member->is_deleted) continue; member->is_deleted = TOMOYO_GC_IN_PROGRESS; tomoyo_try_to_gc(id, &member->list); } } /** * tomoyo_collect_acl - Delete elements in "struct tomoyo_domain_info". * * @list: Pointer to "struct list_head". * * Returns nothing. */ static void tomoyo_collect_acl(struct list_head *list) { struct tomoyo_acl_info *acl; struct tomoyo_acl_info *tmp; list_for_each_entry_safe(acl, tmp, list, list) { if (!acl->is_deleted) continue; acl->is_deleted = TOMOYO_GC_IN_PROGRESS; tomoyo_try_to_gc(TOMOYO_ID_ACL, &acl->list); } } /** * tomoyo_collect_entry - Try to kfree() deleted elements. * * Returns nothing. */ static void tomoyo_collect_entry(void) { int i; enum tomoyo_policy_id id; struct tomoyo_policy_namespace *ns; mutex_lock(&tomoyo_policy_lock); { struct tomoyo_domain_info *domain; struct tomoyo_domain_info *tmp; list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list, list) { tomoyo_collect_acl(&domain->acl_info_list); if (!domain->is_deleted || atomic_read(&domain->users)) continue; tomoyo_try_to_gc(TOMOYO_ID_DOMAIN, &domain->list); } } list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) { for (id = 0; id < TOMOYO_MAX_POLICY; id++) tomoyo_collect_member(id, &ns->policy_list[id]); for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++) tomoyo_collect_acl(&ns->acl_group[i]); } { struct tomoyo_shared_acl_head *ptr; struct tomoyo_shared_acl_head *tmp; list_for_each_entry_safe(ptr, tmp, &tomoyo_condition_list, list) { if (atomic_read(&ptr->users) > 0) continue; atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS); tomoyo_try_to_gc(TOMOYO_ID_CONDITION, &ptr->list); } } list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) { for (i = 0; i < TOMOYO_MAX_GROUP; i++) { struct list_head *list = &ns->group_list[i]; struct tomoyo_group *group; struct tomoyo_group *tmp; switch (i) { case 0: id = TOMOYO_ID_PATH_GROUP; break; case 1: id = TOMOYO_ID_NUMBER_GROUP; break; default: id = TOMOYO_ID_ADDRESS_GROUP; break; } list_for_each_entry_safe(group, tmp, list, head.list) { tomoyo_collect_member(id, &group->member_list); if (!list_empty(&group->member_list) || atomic_read(&group->head.users) > 0) continue; atomic_set(&group->head.users, TOMOYO_GC_IN_PROGRESS); tomoyo_try_to_gc(TOMOYO_ID_GROUP, &group->head.list); } } } for (i = 0; i < TOMOYO_MAX_HASH; i++) { struct list_head *list = &tomoyo_name_list[i]; struct tomoyo_shared_acl_head *ptr; struct tomoyo_shared_acl_head *tmp; list_for_each_entry_safe(ptr, tmp, list, list) { if (atomic_read(&ptr->users) > 0) continue; atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS); tomoyo_try_to_gc(TOMOYO_ID_NAME, &ptr->list); } } mutex_unlock(&tomoyo_policy_lock); } /** * tomoyo_gc_thread - Garbage collector thread function. * * @unused: Unused. * * Returns 0. */ static int tomoyo_gc_thread(void *unused) { /* Garbage collector thread is exclusive. */ static DEFINE_MUTEX(tomoyo_gc_mutex); if (!mutex_trylock(&tomoyo_gc_mutex)) goto out; tomoyo_collect_entry(); { struct tomoyo_io_buffer *head; struct tomoyo_io_buffer *tmp; spin_lock(&tomoyo_io_buffer_list_lock); list_for_each_entry_safe(head, tmp, &tomoyo_io_buffer_list, list) { if (head->users) continue; list_del(&head->list); kfree(head->read_buf); kfree(head->write_buf); kfree(head); } spin_unlock(&tomoyo_io_buffer_list_lock); } mutex_unlock(&tomoyo_gc_mutex); out: /* This acts as do_exit(0). */ return 0; } /** * tomoyo_notify_gc - Register/unregister /sys/kernel/security/tomoyo/ users. * * @head: Pointer to "struct tomoyo_io_buffer". * @is_register: True if register, false if unregister. * * Returns nothing. */ void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register) { bool is_write = false; spin_lock(&tomoyo_io_buffer_list_lock); if (is_register) { head->users = 1; list_add(&head->list, &tomoyo_io_buffer_list); } else { is_write = head->write_buf != NULL; if (!--head->users) { list_del(&head->list); kfree(head->read_buf); kfree(head->write_buf); kfree(head); } } spin_unlock(&tomoyo_io_buffer_list_lock); if (is_write) kthread_run(tomoyo_gc_thread, NULL, "GC for TOMOYO"); }
3 3 3 3 3 3 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 // SPDX-License-Identifier: GPL-2.0-only /* * event_inode.c - part of tracefs, a pseudo file system for activating tracing * * Copyright (C) 2020-23 VMware Inc, author: Steven Rostedt <rostedt@goodmis.org> * Copyright (C) 2020-23 VMware Inc, author: Ajay Kaher <akaher@vmware.com> * Copyright (C) 2023 Google, author: Steven Rostedt <rostedt@goodmis.org> * * eventfs is used to dynamically create inodes and dentries based on the * meta data provided by the tracing system. * * eventfs stores the meta-data of files/dirs and holds off on creating * inodes/dentries of the files. When accessed, the eventfs will create the * inodes/dentries in a just-in-time (JIT) manner. The eventfs will clean up * and delete the inodes/dentries when they are no longer referenced. */ #include <linux/fsnotify.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/workqueue.h> #include <linux/security.h> #include <linux/tracefs.h> #include <linux/kref.h> #include <linux/delay.h> #include "internal.h" /* * eventfs_mutex protects the eventfs_inode (ei) dentry. Any access * to the ei->dentry must be done under this mutex and after checking * if ei->is_freed is not set. When ei->is_freed is set, the dentry * is on its way to being freed after the last dput() is made on it. */ static DEFINE_MUTEX(eventfs_mutex); /* Choose something "unique" ;-) */ #define EVENTFS_FILE_INODE_INO 0x12c4e37 struct eventfs_root_inode { struct eventfs_inode ei; struct dentry *events_dir; }; static struct eventfs_root_inode *get_root_inode(struct eventfs_inode *ei) { WARN_ON_ONCE(!ei->is_events); return container_of(ei, struct eventfs_root_inode, ei); } /* Just try to make something consistent and unique */ static int eventfs_dir_ino(struct eventfs_inode *ei) { if (!ei->ino) { ei->ino = get_next_ino(); /* Must not have the file inode number */ if (ei->ino == EVENTFS_FILE_INODE_INO) ei->ino = get_next_ino(); } return ei->ino; } /* * The eventfs_inode (ei) itself is protected by SRCU. It is released from * its parent's list and will have is_freed set (under eventfs_mutex). * After the SRCU grace period is over and the last dput() is called * the ei is freed. */ DEFINE_STATIC_SRCU(eventfs_srcu); /* Mode is unsigned short, use the upper bits for flags */ enum { EVENTFS_SAVE_MODE = BIT(16), EVENTFS_SAVE_UID = BIT(17), EVENTFS_SAVE_GID = BIT(18), }; #define EVENTFS_MODE_MASK (EVENTFS_SAVE_MODE - 1) static void free_ei_rcu(struct rcu_head *rcu) { struct eventfs_inode *ei = container_of(rcu, struct eventfs_inode, rcu); struct eventfs_root_inode *rei; kfree(ei->entry_attrs); kfree_const(ei->name); if (ei->is_events) { rei = get_root_inode(ei); kfree(rei); } else { kfree(ei); } } /* * eventfs_inode reference count management. * * NOTE! We count only references from dentries, in the * form 'dentry->d_fsdata'. There are also references from * directory inodes ('ti->private'), but the dentry reference * count is always a superset of the inode reference count. */ static void release_ei(struct kref *ref) { struct eventfs_inode *ei = container_of(ref, struct eventfs_inode, kref); const struct eventfs_entry *entry; WARN_ON_ONCE(!ei->is_freed); for (int i = 0; i < ei->nr_entries; i++) { entry = &ei->entries[i]; if (entry->release) entry->release(entry->name, ei->data); } call_srcu(&eventfs_srcu, &ei->rcu, free_ei_rcu); } static inline void put_ei(struct eventfs_inode *ei) { if (ei) kref_put(&ei->kref, release_ei); } static inline void free_ei(struct eventfs_inode *ei) { if (ei) { ei->is_freed = 1; put_ei(ei); } } /* * Called when creation of an ei fails, do not call release() functions. */ static inline void cleanup_ei(struct eventfs_inode *ei) { if (ei) { /* Set nr_entries to 0 to prevent release() function being called */ ei->nr_entries = 0; free_ei(ei); } } static inline struct eventfs_inode *get_ei(struct eventfs_inode *ei) { if (ei) kref_get(&ei->kref); return ei; } static struct dentry *eventfs_root_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags); static int eventfs_iterate(struct file *file, struct dir_context *ctx); static void update_attr(struct eventfs_attr *attr, struct iattr *iattr) { unsigned int ia_valid = iattr->ia_valid; if (ia_valid & ATTR_MODE) { attr->mode = (attr->mode & ~EVENTFS_MODE_MASK) | (iattr->ia_mode & EVENTFS_MODE_MASK) | EVENTFS_SAVE_MODE; } if (ia_valid & ATTR_UID) { attr->mode |= EVENTFS_SAVE_UID; attr->uid = iattr->ia_uid; } if (ia_valid & ATTR_GID) { attr->mode |= EVENTFS_SAVE_GID; attr->gid = iattr->ia_gid; } } static int eventfs_set_attr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *iattr) { const struct eventfs_entry *entry; struct eventfs_inode *ei; const char *name; int ret; mutex_lock(&eventfs_mutex); ei = dentry->d_fsdata; if (ei->is_freed) { /* Do not allow changes if the event is about to be removed. */ mutex_unlock(&eventfs_mutex); return -ENODEV; } /* Preallocate the children mode array if necessary */ if (!(dentry->d_inode->i_mode & S_IFDIR)) { if (!ei->entry_attrs) { ei->entry_attrs = kcalloc(ei->nr_entries, sizeof(*ei->entry_attrs), GFP_NOFS); if (!ei->entry_attrs) { ret = -ENOMEM; goto out; } } } ret = simple_setattr(idmap, dentry, iattr); if (ret < 0) goto out; /* * If this is a dir, then update the ei cache, only the file * mode is saved in the ei->m_children, and the ownership is * determined by the parent directory. */ if (dentry->d_inode->i_mode & S_IFDIR) { /* Just use the inode permissions for the events directory */ if (!ei->is_events) update_attr(&ei->attr, iattr); } else { name = dentry->d_name.name; for (int i = 0; i < ei->nr_entries; i++) { entry = &ei->entries[i]; if (strcmp(name, entry->name) == 0) { update_attr(&ei->entry_attrs[i], iattr); break; } } } out: mutex_unlock(&eventfs_mutex); return ret; } static const struct inode_operations eventfs_dir_inode_operations = { .lookup = eventfs_root_lookup, .setattr = eventfs_set_attr, }; static const struct inode_operations eventfs_file_inode_operations = { .setattr = eventfs_set_attr, }; static const struct file_operations eventfs_file_operations = { .read = generic_read_dir, .iterate_shared = eventfs_iterate, .llseek = generic_file_llseek, }; static void eventfs_set_attrs(struct eventfs_inode *ei, bool update_uid, kuid_t uid, bool update_gid, kgid_t gid, int level) { struct eventfs_inode *ei_child; /* Update events/<system>/<event> */ if (WARN_ON_ONCE(level > 3)) return; if (update_uid) { ei->attr.mode &= ~EVENTFS_SAVE_UID; ei->attr.uid = uid; } if (update_gid) { ei->attr.mode &= ~EVENTFS_SAVE_GID; ei->attr.gid = gid; } list_for_each_entry(ei_child, &ei->children, list) { eventfs_set_attrs(ei_child, update_uid, uid, update_gid, gid, level + 1); } if (!ei->entry_attrs) return; for (int i = 0; i < ei->nr_entries; i++) { if (update_uid) { ei->entry_attrs[i].mode &= ~EVENTFS_SAVE_UID; ei->entry_attrs[i].uid = uid; } if (update_gid) { ei->entry_attrs[i].mode &= ~EVENTFS_SAVE_GID; ei->entry_attrs[i].gid = gid; } } } /* * On a remount of tracefs, if UID or GID options are set, then * the mount point inode permissions should be used. * Reset the saved permission flags appropriately. */ void eventfs_remount(struct tracefs_inode *ti, bool update_uid, bool update_gid) { struct eventfs_inode *ei = ti->private; /* Only the events directory does the updates */ if (!ei || !ei->is_events || ei->is_freed) return; eventfs_set_attrs(ei, update_uid, ti->vfs_inode.i_uid, update_gid, ti->vfs_inode.i_gid, 0); } static void update_inode_attr(struct inode *inode, umode_t mode, struct eventfs_attr *attr, struct eventfs_root_inode *rei) { if (attr && attr->mode & EVENTFS_SAVE_MODE) inode->i_mode = attr->mode & EVENTFS_MODE_MASK; else inode->i_mode = mode; if (attr && attr->mode & EVENTFS_SAVE_UID) inode->i_uid = attr->uid; else inode->i_uid = rei->ei.attr.uid; if (attr && attr->mode & EVENTFS_SAVE_GID) inode->i_gid = attr->gid; else inode->i_gid = rei->ei.attr.gid; } static struct inode *eventfs_get_inode(struct dentry *dentry, struct eventfs_attr *attr, umode_t mode, struct eventfs_inode *ei) { struct eventfs_root_inode *rei; struct eventfs_inode *pei; struct tracefs_inode *ti; struct inode *inode; inode = tracefs_get_inode(dentry->d_sb); if (!inode) return NULL; ti = get_tracefs(inode); ti->private = ei; ti->flags |= TRACEFS_EVENT_INODE; /* Find the top dentry that holds the "events" directory */ do { dentry = dentry->d_parent; /* Directories always have d_fsdata */ pei = dentry->d_fsdata; } while (!pei->is_events); rei = get_root_inode(pei); update_inode_attr(inode, mode, attr, rei); return inode; } /** * lookup_file - look up a file in the tracefs filesystem * @parent_ei: Pointer to the eventfs_inode that represents parent of the file * @dentry: the dentry to look up * @mode: the permission that the file should have. * @attr: saved attributes changed by user * @data: something that the caller will want to get to later on. * @fop: struct file_operations that should be used for this file. * * This function creates a dentry that represents a file in the eventsfs_inode * directory. The inode.i_private pointer will point to @data in the open() * call. */ static struct dentry *lookup_file(struct eventfs_inode *parent_ei, struct dentry *dentry, umode_t mode, struct eventfs_attr *attr, void *data, const struct file_operations *fop) { struct inode *inode; if (!(mode & S_IFMT)) mode |= S_IFREG; if (WARN_ON_ONCE(!S_ISREG(mode))) return ERR_PTR(-EIO); /* Only directories have ti->private set to an ei, not files */ inode = eventfs_get_inode(dentry, attr, mode, NULL); if (unlikely(!inode)) return ERR_PTR(-ENOMEM); inode->i_op = &eventfs_file_inode_operations; inode->i_fop = fop; inode->i_private = data; /* All files will have the same inode number */ inode->i_ino = EVENTFS_FILE_INODE_INO; // Files have their parent's ei as their fsdata dentry->d_fsdata = get_ei(parent_ei); d_add(dentry, inode); return NULL; }; /** * lookup_dir_entry - look up a dir in the tracefs filesystem * @dentry: the directory to look up * @pei: Pointer to the parent eventfs_inode if available * @ei: the eventfs_inode that represents the directory to create * * This function will look up a dentry for a directory represented by * a eventfs_inode. */ static struct dentry *lookup_dir_entry(struct dentry *dentry, struct eventfs_inode *pei, struct eventfs_inode *ei) { struct inode *inode; umode_t mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; inode = eventfs_get_inode(dentry, &ei->attr, mode, ei); if (unlikely(!inode)) return ERR_PTR(-ENOMEM); inode->i_op = &eventfs_dir_inode_operations; inode->i_fop = &eventfs_file_operations; /* All directories will have the same inode number */ inode->i_ino = eventfs_dir_ino(ei); dentry->d_fsdata = get_ei(ei); d_add(dentry, inode); return NULL; } static inline struct eventfs_inode *init_ei(struct eventfs_inode *ei, const char *name) { ei->name = kstrdup_const(name, GFP_KERNEL); if (!ei->name) return NULL; kref_init(&ei->kref); return ei; } static inline struct eventfs_inode *alloc_ei(const char *name) { struct eventfs_inode *ei = kzalloc(sizeof(*ei), GFP_KERNEL); struct eventfs_inode *result; if (!ei) return NULL; result = init_ei(ei, name); if (!result) kfree(ei); return result; } static inline struct eventfs_inode *alloc_root_ei(const char *name) { struct eventfs_root_inode *rei = kzalloc(sizeof(*rei), GFP_KERNEL); struct eventfs_inode *ei; if (!rei) return NULL; rei->ei.is_events = 1; ei = init_ei(&rei->ei, name); if (!ei) kfree(rei); return ei; } /** * eventfs_d_release - dentry is going away * @dentry: dentry which has the reference to remove. * * Remove the association between a dentry from an eventfs_inode. */ void eventfs_d_release(struct dentry *dentry) { put_ei(dentry->d_fsdata); } /** * lookup_file_dentry - create a dentry for a file of an eventfs_inode * @dentry: The parent dentry under which the new file's dentry will be created * @ei: the eventfs_inode that the file will be created under * @idx: the index into the entry_attrs[] of the @ei * @mode: The mode of the file. * @data: The data to use to set the inode of the file with on open() * @fops: The fops of the file to be created. * * This function creates a dentry for a file associated with an * eventfs_inode @ei. It uses the entry attributes specified by @idx, * if available. The file will have the specified @mode and its inode will be * set up with @data upon open. The file operations will be set to @fops. * * Return: Returns a pointer to the newly created file's dentry or an error * pointer. */ static struct dentry * lookup_file_dentry(struct dentry *dentry, struct eventfs_inode *ei, int idx, umode_t mode, void *data, const struct file_operations *fops) { struct eventfs_attr *attr = NULL; if (ei->entry_attrs) attr = &ei->entry_attrs[idx]; return lookup_file(ei, dentry, mode, attr, data, fops); } /** * eventfs_root_lookup - lookup routine to create file/dir * @dir: in which a lookup is being done * @dentry: file/dir dentry * @flags: Just passed to simple_lookup() * * Used to create dynamic file/dir with-in @dir, search with-in @ei * list, if @dentry found go ahead and create the file/dir */ static struct dentry *eventfs_root_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct eventfs_inode *ei_child; struct tracefs_inode *ti; struct eventfs_inode *ei; const char *name = dentry->d_name.name; struct dentry *result = NULL; ti = get_tracefs(dir); if (WARN_ON_ONCE(!(ti->flags & TRACEFS_EVENT_INODE))) return ERR_PTR(-EIO); mutex_lock(&eventfs_mutex); ei = ti->private; if (!ei || ei->is_freed) goto out; list_for_each_entry(ei_child, &ei->children, list) { if (strcmp(ei_child->name, name) != 0) continue; /* A child is freed and removed from the list at the same time */ if (WARN_ON_ONCE(ei_child->is_freed)) goto out; result = lookup_dir_entry(dentry, ei, ei_child); goto out; } for (int i = 0; i < ei->nr_entries; i++) { void *data; umode_t mode; const struct file_operations *fops; const struct eventfs_entry *entry = &ei->entries[i]; if (strcmp(name, entry->name) != 0) continue; data = ei->data; if (entry->callback(name, &mode, &data, &fops) <= 0) goto out; result = lookup_file_dentry(dentry, ei, i, mode, data, fops); goto out; } out: mutex_unlock(&eventfs_mutex); return result; } /* * Walk the children of a eventfs_inode to fill in getdents(). */ static int eventfs_iterate(struct file *file, struct dir_context *ctx) { const struct file_operations *fops; struct inode *f_inode = file_inode(file); const struct eventfs_entry *entry; struct eventfs_inode *ei_child; struct tracefs_inode *ti; struct eventfs_inode *ei; const char *name; umode_t mode; int idx; int ret = -EINVAL; int ino; int i, r, c; if (!dir_emit_dots(file, ctx)) return 0; ti = get_tracefs(f_inode); if (!(ti->flags & TRACEFS_EVENT_INODE)) return -EINVAL; c = ctx->pos - 2; idx = srcu_read_lock(&eventfs_srcu); mutex_lock(&eventfs_mutex); ei = READ_ONCE(ti->private); if (ei && ei->is_freed) ei = NULL; mutex_unlock(&eventfs_mutex); if (!ei) goto out; /* * Need to create the dentries and inodes to have a consistent * inode number. */ ret = 0; /* Start at 'c' to jump over already read entries */ for (i = c; i < ei->nr_entries; i++, ctx->pos++) { void *cdata = ei->data; entry = &ei->entries[i]; name = entry->name; mutex_lock(&eventfs_mutex); /* If ei->is_freed then just bail here, nothing more to do */ if (ei->is_freed) { mutex_unlock(&eventfs_mutex); goto out; } r = entry->callback(name, &mode, &cdata, &fops); mutex_unlock(&eventfs_mutex); if (r <= 0) continue; ino = EVENTFS_FILE_INODE_INO; if (!dir_emit(ctx, name, strlen(name), ino, DT_REG)) goto out; } /* Subtract the skipped entries above */ c -= min((unsigned int)c, (unsigned int)ei->nr_entries); list_for_each_entry_srcu(ei_child, &ei->children, list, srcu_read_lock_held(&eventfs_srcu)) { if (c > 0) { c--; continue; } ctx->pos++; if (ei_child->is_freed) continue; name = ei_child->name; ino = eventfs_dir_ino(ei_child); if (!dir_emit(ctx, name, strlen(name), ino, DT_DIR)) goto out_dec; } ret = 1; out: srcu_read_unlock(&eventfs_srcu, idx); return ret; out_dec: /* Incremented ctx->pos without adding something, reset it */ ctx->pos--; goto out; } /** * eventfs_create_dir - Create the eventfs_inode for this directory * @name: The name of the directory to create. * @parent: The eventfs_inode of the parent directory. * @entries: A list of entries that represent the files under this directory * @size: The number of @entries * @data: The default data to pass to the files (an entry may override it). * * This function creates the descriptor to represent a directory in the * eventfs. This descriptor is an eventfs_inode, and it is returned to be * used to create other children underneath. * * The @entries is an array of eventfs_entry structures which has: * const char *name * eventfs_callback callback; * * The name is the name of the file, and the callback is a pointer to a function * that will be called when the file is reference (either by lookup or by * reading a directory). The callback is of the prototype: * * int callback(const char *name, umode_t *mode, void **data, * const struct file_operations **fops); * * When a file needs to be created, this callback will be called with * name = the name of the file being created (so that the same callback * may be used for multiple files). * mode = a place to set the file's mode * data = A pointer to @data, and the callback may replace it, which will * cause the file created to pass the new data to the open() call. * fops = the fops to use for the created file. * * NB. @callback is called while holding internal locks of the eventfs * system. The callback must not call any code that might also call into * the tracefs or eventfs system or it will risk creating a deadlock. */ struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode *parent, const struct eventfs_entry *entries, int size, void *data) { struct eventfs_inode *ei; if (!parent) return ERR_PTR(-EINVAL); ei = alloc_ei(name); if (!ei) return ERR_PTR(-ENOMEM); ei->entries = entries; ei->nr_entries = size; ei->data = data; INIT_LIST_HEAD(&ei->children); INIT_LIST_HEAD(&ei->list); mutex_lock(&eventfs_mutex); if (!parent->is_freed) list_add_tail(&ei->list, &parent->children); mutex_unlock(&eventfs_mutex); /* Was the parent freed? */ if (list_empty(&ei->list)) { cleanup_ei(ei); ei = ERR_PTR(-EBUSY); } return ei; } /** * eventfs_create_events_dir - create the top level events directory * @name: The name of the top level directory to create. * @parent: Parent dentry for this file in the tracefs directory. * @entries: A list of entries that represent the files under this directory * @size: The number of @entries * @data: The default data to pass to the files (an entry may override it). * * This function creates the top of the trace event directory. * * See eventfs_create_dir() for use of @entries. */ struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry *parent, const struct eventfs_entry *entries, int size, void *data) { struct dentry *dentry = tracefs_start_creating(name, parent); struct eventfs_root_inode *rei; struct eventfs_inode *ei; struct tracefs_inode *ti; struct inode *inode; kuid_t uid; kgid_t gid; if (security_locked_down(LOCKDOWN_TRACEFS)) return NULL; if (IS_ERR(dentry)) return ERR_CAST(dentry); ei = alloc_root_ei(name); if (!ei) goto fail; inode = tracefs_get_inode(dentry->d_sb); if (unlikely(!inode)) goto fail; // Note: we have a ref to the dentry from tracefs_start_creating() rei = get_root_inode(ei); rei->events_dir = dentry; ei->entries = entries; ei->nr_entries = size; ei->data = data; /* Save the ownership of this directory */ uid = d_inode(dentry->d_parent)->i_uid; gid = d_inode(dentry->d_parent)->i_gid; /* * The ei->attr will be used as the default values for the * files beneath this directory. */ ei->attr.uid = uid; ei->attr.gid = gid; INIT_LIST_HEAD(&ei->children); INIT_LIST_HEAD(&ei->list); ti = get_tracefs(inode); ti->flags |= TRACEFS_EVENT_INODE; ti->private = ei; inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; inode->i_uid = uid; inode->i_gid = gid; inode->i_op = &eventfs_dir_inode_operations; inode->i_fop = &eventfs_file_operations; dentry->d_fsdata = get_ei(ei); /* * Keep all eventfs directories with i_nlink == 1. * Due to the dynamic nature of the dentry creations and not * wanting to add a pointer to the parent eventfs_inode in the * eventfs_inode structure, keeping the i_nlink in sync with the * number of directories would cause too much complexity for * something not worth much. Keeping directory links at 1 * tells userspace not to trust the link number. */ d_instantiate(dentry, inode); /* The dentry of the "events" parent does keep track though */ inc_nlink(dentry->d_parent->d_inode); fsnotify_mkdir(dentry->d_parent->d_inode, dentry); tracefs_end_creating(dentry); return ei; fail: cleanup_ei(ei); tracefs_failed_creating(dentry); return ERR_PTR(-ENOMEM); } /** * eventfs_remove_rec - remove eventfs dir or file from list * @ei: eventfs_inode to be removed. * @level: prevent recursion from going more than 3 levels deep. * * This function recursively removes eventfs_inodes which * contains info of files and/or directories. */ static void eventfs_remove_rec(struct eventfs_inode *ei, int level) { struct eventfs_inode *ei_child; /* * Check recursion depth. It should never be greater than 3: * 0 - events/ * 1 - events/group/ * 2 - events/group/event/ * 3 - events/group/event/file */ if (WARN_ON_ONCE(level > 3)) return; /* search for nested folders or files */ list_for_each_entry(ei_child, &ei->children, list) eventfs_remove_rec(ei_child, level + 1); list_del_rcu(&ei->list); free_ei(ei); } /** * eventfs_remove_dir - remove eventfs dir or file from list * @ei: eventfs_inode to be removed. * * This function acquire the eventfs_mutex lock and call eventfs_remove_rec() */ void eventfs_remove_dir(struct eventfs_inode *ei) { if (!ei) return; mutex_lock(&eventfs_mutex); eventfs_remove_rec(ei, 0); mutex_unlock(&eventfs_mutex); } /** * eventfs_remove_events_dir - remove the top level eventfs directory * @ei: the event_inode returned by eventfs_create_events_dir(). * * This function removes the events main directory */ void eventfs_remove_events_dir(struct eventfs_inode *ei) { struct eventfs_root_inode *rei; struct dentry *dentry; rei = get_root_inode(ei); dentry = rei->events_dir; if (!dentry) return; rei->events_dir = NULL; eventfs_remove_dir(ei); /* * Matches the dget() done by tracefs_start_creating() * in eventfs_create_events_dir() when it the dentry was * created. In other words, it's a normal dentry that * sticks around while the other ei->dentry are created * and destroyed dynamically. */ d_invalidate(dentry); dput(dentry); }
20 9 20 2 18 1 17 12 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 // SPDX-License-Identifier: GPL-2.0-only /* iptables module for using new netfilter netlink queue * * (C) 2005 by Harald Welte <laforge@netfilter.org> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter.h> #include <linux/netfilter_arp.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_NFQUEUE.h> #include <net/netfilter/nf_queue.h> MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_DESCRIPTION("Xtables: packet forwarding to netlink"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_NFQUEUE"); MODULE_ALIAS("ip6t_NFQUEUE"); MODULE_ALIAS("arpt_NFQUEUE"); static u32 jhash_initval __read_mostly; static unsigned int nfqueue_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_NFQ_info *tinfo = par->targinfo; return NF_QUEUE_NR(tinfo->queuenum); } static unsigned int nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_NFQ_info_v1 *info = par->targinfo; u32 queue = info->queuenum; if (info->queues_total > 1) { queue = nfqueue_hash(skb, queue, info->queues_total, xt_family(par), jhash_initval); } return NF_QUEUE_NR(queue); } static unsigned int nfqueue_tg_v2(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_NFQ_info_v2 *info = par->targinfo; unsigned int ret = nfqueue_tg_v1(skb, par); if (info->bypass) ret |= NF_VERDICT_FLAG_QUEUE_BYPASS; return ret; } static int nfqueue_tg_check(const struct xt_tgchk_param *par) { const struct xt_NFQ_info_v3 *info = par->targinfo; u32 maxid; init_hashrandom(&jhash_initval); if (info->queues_total == 0) { pr_info_ratelimited("number of total queues is 0\n"); return -EINVAL; } maxid = info->queues_total - 1 + info->queuenum; if (maxid > 0xffff) { pr_info_ratelimited("number of queues (%u) out of range (got %u)\n", info->queues_total, maxid); return -ERANGE; } if (par->target->revision == 2 && info->flags > 1) return -EINVAL; if (par->target->revision == 3 && info->flags & ~NFQ_FLAG_MASK) return -EINVAL; return 0; } static unsigned int nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_NFQ_info_v3 *info = par->targinfo; u32 queue = info->queuenum; int ret; if (info->queues_total > 1) { if (info->flags & NFQ_FLAG_CPU_FANOUT) { int cpu = smp_processor_id(); queue = info->queuenum + cpu % info->queues_total; } else { queue = nfqueue_hash(skb, queue, info->queues_total, xt_family(par), jhash_initval); } } ret = NF_QUEUE_NR(queue); if (info->flags & NFQ_FLAG_BYPASS) ret |= NF_VERDICT_FLAG_QUEUE_BYPASS; return ret; } static struct xt_target nfqueue_tg_reg[] __read_mostly = { { .name = "NFQUEUE", .family = NFPROTO_UNSPEC, .target = nfqueue_tg, .targetsize = sizeof(struct xt_NFQ_info), .me = THIS_MODULE, }, { .name = "NFQUEUE", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = nfqueue_tg_check, .target = nfqueue_tg_v1, .targetsize = sizeof(struct xt_NFQ_info_v1), .me = THIS_MODULE, }, { .name = "NFQUEUE", .revision = 2, .family = NFPROTO_UNSPEC, .checkentry = nfqueue_tg_check, .target = nfqueue_tg_v2, .targetsize = sizeof(struct xt_NFQ_info_v2), .me = THIS_MODULE, }, { .name = "NFQUEUE", .revision = 3, .family = NFPROTO_UNSPEC, .checkentry = nfqueue_tg_check, .target = nfqueue_tg_v3, .targetsize = sizeof(struct xt_NFQ_info_v3), .me = THIS_MODULE, }, }; static int __init nfqueue_tg_init(void) { return xt_register_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg)); } static void __exit nfqueue_tg_exit(void) { xt_unregister_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg)); } module_init(nfqueue_tg_init); module_exit(nfqueue_tg_exit);
1 2 1 2 1 1 1 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 // SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API. * * CRC32C chksum * *@Article{castagnoli-crc, * author = { Guy Castagnoli and Stefan Braeuer and Martin Herrman}, * title = {{Optimization of Cyclic Redundancy-Check Codes with 24 * and 32 Parity Bits}}, * journal = IEEE Transactions on Communication, * year = {1993}, * volume = {41}, * number = {6}, * pages = {}, * month = {June}, *} * Used by the iSCSI driver, possibly others, and derived from * the iscsi-crc.c module of the linux-iscsi driver at * http://linux-iscsi.sourceforge.net. * * Following the example of lib/crc32, this function is intended to be * flexible and useful for all users. Modules that currently have their * own crc32c, but hopefully may be able to use this one are: * net/sctp (please add all your doco to here if you change to * use this one!) * <endoflist> * * Copyright (c) 2004 Cisco Systems, Inc. * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> */ #include <linux/unaligned.h> #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/crc32.h> #define CHKSUM_BLOCK_SIZE 1 #define CHKSUM_DIGEST_SIZE 4 struct chksum_ctx { u32 key; }; struct chksum_desc_ctx { u32 crc; }; /* * Steps through buffer one byte at a time, calculates reflected * crc using table. */ static int chksum_init(struct shash_desc *desc) { struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); ctx->crc = mctx->key; return 0; } /* * Setting the seed allows arbitrary accumulators and flexible XOR policy * If your algorithm starts with ~0, then XOR with ~0 before you set * the seed. */ static int chksum_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct chksum_ctx *mctx = crypto_shash_ctx(tfm); if (keylen != sizeof(mctx->key)) return -EINVAL; mctx->key = get_unaligned_le32(key); return 0; } static int chksum_update(struct shash_desc *desc, const u8 *data, unsigned int length) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); ctx->crc = crc32c(ctx->crc, data, length); return 0; } static int chksum_final(struct shash_desc *desc, u8 *out) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); put_unaligned_le32(~ctx->crc, out); return 0; } static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { put_unaligned_le32(~crc32c(*crcp, data, len), out); return 0; } static int chksum_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); return __chksum_finup(&ctx->crc, data, len, out); } static int chksum_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); return __chksum_finup(&mctx->key, data, length, out); } static int crc32c_cra_init(struct crypto_tfm *tfm) { struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); mctx->key = ~0; return 0; } static struct shash_alg alg = { .digestsize = CHKSUM_DIGEST_SIZE, .setkey = chksum_setkey, .init = chksum_init, .update = chksum_update, .final = chksum_final, .finup = chksum_finup, .digest = chksum_digest, .descsize = sizeof(struct chksum_desc_ctx), .base.cra_name = "crc32c", .base.cra_driver_name = "crc32c-lib", .base.cra_priority = 100, .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .base.cra_blocksize = CHKSUM_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct chksum_ctx), .base.cra_module = THIS_MODULE, .base.cra_init = crc32c_cra_init, }; static int __init crc32c_mod_init(void) { return crypto_register_shash(&alg); } static void __exit crc32c_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(crc32c_mod_init); module_exit(crc32c_mod_fini); MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>"); MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("crc32c");
132 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TIMERQUEUE_H #define _LINUX_TIMERQUEUE_H #include <linux/rbtree.h> #include <linux/timerqueue_types.h> extern bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node); extern bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node); extern struct timerqueue_node *timerqueue_iterate_next( struct timerqueue_node *node); /** * timerqueue_getnext - Returns the timer with the earliest expiration time * * @head: head of timerqueue * * Returns a pointer to the timer node that has the earliest expiration time. */ static inline struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) { struct rb_node *leftmost = rb_first_cached(&head->rb_root); return rb_entry_safe(leftmost, struct timerqueue_node, node); } static inline void timerqueue_init(struct timerqueue_node *node) { RB_CLEAR_NODE(&node->node); } static inline bool timerqueue_node_queued(struct timerqueue_node *node) { return !RB_EMPTY_NODE(&node->node); } static inline void timerqueue_init_head(struct timerqueue_head *head) { head->rb_root = RB_ROOT_CACHED; } #endif /* _LINUX_TIMERQUEUE_H */
2 13 13 2 8 5 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM jbd2 #if !defined(_TRACE_JBD2_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_JBD2_H #include <linux/jbd2.h> #include <linux/tracepoint.h> struct transaction_chp_stats_s; struct transaction_run_stats_s; TRACE_EVENT(jbd2_checkpoint, TP_PROTO(journal_t *journal, int result), TP_ARGS(journal, result), TP_STRUCT__entry( __field( dev_t, dev ) __field( int, result ) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->result = result; ), TP_printk("dev %d,%d result %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->result) ); DECLARE_EVENT_CLASS(jbd2_commit, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction), TP_STRUCT__entry( __field( dev_t, dev ) __field( char, sync_commit ) __field( tid_t, transaction ) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->sync_commit = commit_transaction->t_synchronous_commit; __entry->transaction = commit_transaction->t_tid; ), TP_printk("dev %d,%d transaction %u sync %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->transaction, __entry->sync_commit) ); DEFINE_EVENT(jbd2_commit, jbd2_start_commit, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction) ); DEFINE_EVENT(jbd2_commit, jbd2_commit_locking, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction) ); DEFINE_EVENT(jbd2_commit, jbd2_commit_flushing, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction) ); DEFINE_EVENT(jbd2_commit, jbd2_commit_logging, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction) ); DEFINE_EVENT(jbd2_commit, jbd2_drop_transaction, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction) ); TRACE_EVENT(jbd2_end_commit, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction), TP_STRUCT__entry( __field( dev_t, dev ) __field( char, sync_commit ) __field( tid_t, transaction ) __field( tid_t, head ) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->sync_commit = commit_transaction->t_synchronous_commit; __entry->transaction = commit_transaction->t_tid; __entry->head = journal->j_tail_sequence; ), TP_printk("dev %d,%d transaction %u sync %d head %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->transaction, __entry->sync_commit, __entry->head) ); TRACE_EVENT(jbd2_submit_inode_data, TP_PROTO(struct inode *inode), TP_ARGS(inode), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; ), TP_printk("dev %d,%d ino %lu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino) ); DECLARE_EVENT_CLASS(jbd2_handle_start_class, TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int requested_blocks), TP_ARGS(dev, tid, type, line_no, requested_blocks), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tid ) __field( unsigned int, type ) __field( unsigned int, line_no ) __field( int, requested_blocks) ), TP_fast_assign( __entry->dev = dev; __entry->tid = tid; __entry->type = type; __entry->line_no = line_no; __entry->requested_blocks = requested_blocks; ), TP_printk("dev %d,%d tid %u type %u line_no %u " "requested_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->type, __entry->line_no, __entry->requested_blocks) ); DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_start, TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int requested_blocks), TP_ARGS(dev, tid, type, line_no, requested_blocks) ); DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_restart, TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int requested_blocks), TP_ARGS(dev, tid, type, line_no, requested_blocks) ); TRACE_EVENT(jbd2_handle_extend, TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int buffer_credits, int requested_blocks), TP_ARGS(dev, tid, type, line_no, buffer_credits, requested_blocks), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tid ) __field( unsigned int, type ) __field( unsigned int, line_no ) __field( int, buffer_credits ) __field( int, requested_blocks) ), TP_fast_assign( __entry->dev = dev; __entry->tid = tid; __entry->type = type; __entry->line_no = line_no; __entry->buffer_credits = buffer_credits; __entry->requested_blocks = requested_blocks; ), TP_printk("dev %d,%d tid %u type %u line_no %u " "buffer_credits %d requested_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->type, __entry->line_no, __entry->buffer_credits, __entry->requested_blocks) ); TRACE_EVENT(jbd2_handle_stats, TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int interval, int sync, int requested_blocks, int dirtied_blocks), TP_ARGS(dev, tid, type, line_no, interval, sync, requested_blocks, dirtied_blocks), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tid ) __field( unsigned int, type ) __field( unsigned int, line_no ) __field( int, interval ) __field( int, sync ) __field( int, requested_blocks) __field( int, dirtied_blocks ) ), TP_fast_assign( __entry->dev = dev; __entry->tid = tid; __entry->type = type; __entry->line_no = line_no; __entry->interval = interval; __entry->sync = sync; __entry->requested_blocks = requested_blocks; __entry->dirtied_blocks = dirtied_blocks; ), TP_printk("dev %d,%d tid %u type %u line_no %u interval %d " "sync %d requested_blocks %d dirtied_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->type, __entry->line_no, __entry->interval, __entry->sync, __entry->requested_blocks, __entry->dirtied_blocks) ); TRACE_EVENT(jbd2_run_stats, TP_PROTO(dev_t dev, tid_t tid, struct transaction_run_stats_s *stats), TP_ARGS(dev, tid, stats), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tid ) __field( unsigned long, wait ) __field( unsigned long, request_delay ) __field( unsigned long, running ) __field( unsigned long, locked ) __field( unsigned long, flushing ) __field( unsigned long, logging ) __field( __u32, handle_count ) __field( __u32, blocks ) __field( __u32, blocks_logged ) ), TP_fast_assign( __entry->dev = dev; __entry->tid = tid; __entry->wait = stats->rs_wait; __entry->request_delay = stats->rs_request_delay; __entry->running = stats->rs_running; __entry->locked = stats->rs_locked; __entry->flushing = stats->rs_flushing; __entry->logging = stats->rs_logging; __entry->handle_count = stats->rs_handle_count; __entry->blocks = stats->rs_blocks; __entry->blocks_logged = stats->rs_blocks_logged; ), TP_printk("dev %d,%d tid %u wait %u request_delay %u running %u " "locked %u flushing %u logging %u handle_count %u " "blocks %u blocks_logged %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, jiffies_to_msecs(__entry->wait), jiffies_to_msecs(__entry->request_delay), jiffies_to_msecs(__entry->running), jiffies_to_msecs(__entry->locked), jiffies_to_msecs(__entry->flushing), jiffies_to_msecs(__entry->logging), __entry->handle_count, __entry->blocks, __entry->blocks_logged) ); TRACE_EVENT(jbd2_checkpoint_stats, TP_PROTO(dev_t dev, tid_t tid, struct transaction_chp_stats_s *stats), TP_ARGS(dev, tid, stats), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tid ) __field( unsigned long, chp_time ) __field( __u32, forced_to_close ) __field( __u32, written ) __field( __u32, dropped ) ), TP_fast_assign( __entry->dev = dev; __entry->tid = tid; __entry->chp_time = stats->cs_chp_time; __entry->forced_to_close= stats->cs_forced_to_close; __entry->written = stats->cs_written; __entry->dropped = stats->cs_dropped; ), TP_printk("dev %d,%d tid %u chp_time %u forced_to_close %u " "written %u dropped %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, jiffies_to_msecs(__entry->chp_time), __entry->forced_to_close, __entry->written, __entry->dropped) ); TRACE_EVENT(jbd2_update_log_tail, TP_PROTO(journal_t *journal, tid_t first_tid, unsigned long block_nr, unsigned long freed), TP_ARGS(journal, first_tid, block_nr, freed), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tail_sequence ) __field( tid_t, first_tid ) __field(unsigned long, block_nr ) __field(unsigned long, freed ) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->tail_sequence = journal->j_tail_sequence; __entry->first_tid = first_tid; __entry->block_nr = block_nr; __entry->freed = freed; ), TP_printk("dev %d,%d from %u to %u offset %lu freed %lu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tail_sequence, __entry->first_tid, __entry->block_nr, __entry->freed) ); TRACE_EVENT(jbd2_write_superblock, TP_PROTO(journal_t *journal, blk_opf_t write_flags), TP_ARGS(journal, write_flags), TP_STRUCT__entry( __field( dev_t, dev ) __field( blk_opf_t, write_flags ) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->write_flags = write_flags; ), TP_printk("dev %d,%d write_flags %x", MAJOR(__entry->dev), MINOR(__entry->dev), (__force u32)__entry->write_flags) ); TRACE_EVENT(jbd2_lock_buffer_stall, TP_PROTO(dev_t dev, unsigned long stall_ms), TP_ARGS(dev, stall_ms), TP_STRUCT__entry( __field( dev_t, dev ) __field(unsigned long, stall_ms ) ), TP_fast_assign( __entry->dev = dev; __entry->stall_ms = stall_ms; ), TP_printk("dev %d,%d stall_ms %lu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->stall_ms) ); DECLARE_EVENT_CLASS(jbd2_journal_shrink, TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count), TP_ARGS(journal, nr_to_scan, count), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long, nr_to_scan) __field(unsigned long, count) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->nr_to_scan = nr_to_scan; __entry->count = count; ), TP_printk("dev %d,%d nr_to_scan %lu count %lu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_to_scan, __entry->count) ); DEFINE_EVENT(jbd2_journal_shrink, jbd2_shrink_count, TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count), TP_ARGS(journal, nr_to_scan, count) ); DEFINE_EVENT(jbd2_journal_shrink, jbd2_shrink_scan_enter, TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count), TP_ARGS(journal, nr_to_scan, count) ); TRACE_EVENT(jbd2_shrink_scan_exit, TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long nr_shrunk, unsigned long count), TP_ARGS(journal, nr_to_scan, nr_shrunk, count), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long, nr_to_scan) __field(unsigned long, nr_shrunk) __field(unsigned long, count) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->nr_to_scan = nr_to_scan; __entry->nr_shrunk = nr_shrunk; __entry->count = count; ), TP_printk("dev %d,%d nr_to_scan %lu nr_shrunk %lu count %lu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_to_scan, __entry->nr_shrunk, __entry->count) ); TRACE_EVENT(jbd2_shrink_checkpoint_list, TP_PROTO(journal_t *journal, tid_t first_tid, tid_t tid, tid_t last_tid, unsigned long nr_freed, tid_t next_tid), TP_ARGS(journal, first_tid, tid, last_tid, nr_freed, next_tid), TP_STRUCT__entry( __field(dev_t, dev) __field(tid_t, first_tid) __field(tid_t, tid) __field(tid_t, last_tid) __field(unsigned long, nr_freed) __field(tid_t, next_tid) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->first_tid = first_tid; __entry->tid = tid; __entry->last_tid = last_tid; __entry->nr_freed = nr_freed; __entry->next_tid = next_tid; ), TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu " "next transaction %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->first_tid, __entry->tid, __entry->last_tid, __entry->nr_freed, __entry->next_tid) ); #endif /* _TRACE_JBD2_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
71 71 71 71 32 9 9 4 5 5 5 5 4 9 3 3 3 3 48 22 3 3 3 3 3 2 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 /* * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2 * * Copyright (C) 2010 Samsung Electronics * * Author: Pawel Osciak <pawel@osciak.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. */ #include <linux/io.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/refcount.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-vmalloc.h> #include <media/videobuf2-memops.h> struct vb2_vmalloc_buf { void *vaddr; struct frame_vector *vec; enum dma_data_direction dma_dir; unsigned long size; refcount_t refcount; struct vb2_vmarea_handler handler; struct dma_buf *dbuf; }; static void vb2_vmalloc_put(void *buf_priv); static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev, unsigned long size) { struct vb2_vmalloc_buf *buf; buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags); if (!buf) return ERR_PTR(-ENOMEM); buf->size = size; buf->vaddr = vmalloc_user(buf->size); if (!buf->vaddr) { pr_debug("vmalloc of size %ld failed\n", buf->size); kfree(buf); return ERR_PTR(-ENOMEM); } buf->dma_dir = vb->vb2_queue->dma_dir; buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_vmalloc_put; buf->handler.arg = buf; refcount_set(&buf->refcount, 1); return buf; } static void vb2_vmalloc_put(void *buf_priv) { struct vb2_vmalloc_buf *buf = buf_priv; if (refcount_dec_and_test(&buf->refcount)) { vfree(buf->vaddr); kfree(buf); } } static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev, unsigned long vaddr, unsigned long size) { struct vb2_vmalloc_buf *buf; struct frame_vector *vec; int n_pages, offset, i; int ret = -ENOMEM; buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (!buf) return ERR_PTR(-ENOMEM); buf->dma_dir = vb->vb2_queue->dma_dir; offset = vaddr & ~PAGE_MASK; buf->size = size; vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE || buf->dma_dir == DMA_BIDIRECTIONAL); if (IS_ERR(vec)) { ret = PTR_ERR(vec); goto fail_pfnvec_create; } buf->vec = vec; n_pages = frame_vector_count(vec); if (frame_vector_to_pages(vec) < 0) { unsigned long *nums = frame_vector_pfns(vec); /* * We cannot get page pointers for these pfns. Check memory is * physically contiguous and use direct mapping. */ for (i = 1; i < n_pages; i++) if (nums[i-1] + 1 != nums[i]) goto fail_map; buf->vaddr = (__force void *) ioremap(__pfn_to_phys(nums[0]), size + offset); } else { buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1); } if (!buf->vaddr) goto fail_map; buf->vaddr += offset; return buf; fail_map: vb2_destroy_framevec(vec); fail_pfnvec_create: kfree(buf); return ERR_PTR(ret); } static void vb2_vmalloc_put_userptr(void *buf_priv) { struct vb2_vmalloc_buf *buf = buf_priv; unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; unsigned int i; struct page **pages; unsigned int n_pages; if (!buf->vec->is_pfns) { n_pages = frame_vector_count(buf->vec); if (vaddr) vm_unmap_ram((void *)vaddr, n_pages); if (buf->dma_dir == DMA_FROM_DEVICE || buf->dma_dir == DMA_BIDIRECTIONAL) { pages = frame_vector_pages(buf->vec); if (!WARN_ON_ONCE(IS_ERR(pages))) for (i = 0; i < n_pages; i++) set_page_dirty_lock(pages[i]); } } else { iounmap((__force void __iomem *)buf->vaddr); } vb2_destroy_framevec(buf->vec); kfree(buf); } static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv) { struct vb2_vmalloc_buf *buf = buf_priv; if (!buf->vaddr) { pr_err("Address of an unallocated plane requested or cannot map user pointer\n"); return NULL; } return buf->vaddr; } static unsigned int vb2_vmalloc_num_users(void *buf_priv) { struct vb2_vmalloc_buf *buf = buf_priv; return refcount_read(&buf->refcount); } static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma) { struct vb2_vmalloc_buf *buf = buf_priv; int ret; if (!buf) { pr_err("No memory to map\n"); return -EINVAL; } ret = remap_vmalloc_range(vma, buf->vaddr, 0); if (ret) { pr_err("Remapping vmalloc memory, error: %d\n", ret); return ret; } /* * Make sure that vm_areas for 2 buffers won't be merged together */ vm_flags_set(vma, VM_DONTEXPAND); /* * Use common vm_area operations to track buffer refcount. */ vma->vm_private_data = &buf->handler; vma->vm_ops = &vb2_common_vm_ops; vma->vm_ops->open(vma); return 0; } #ifdef CONFIG_HAS_DMA /*********************************************/ /* DMABUF ops for exporters */ /*********************************************/ struct vb2_vmalloc_attachment { struct sg_table sgt; enum dma_data_direction dma_dir; }; static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct dma_buf_attachment *dbuf_attach) { struct vb2_vmalloc_attachment *attach; struct vb2_vmalloc_buf *buf = dbuf->priv; int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE; struct sg_table *sgt; struct scatterlist *sg; void *vaddr = buf->vaddr; int ret; int i; attach = kzalloc(sizeof(*attach), GFP_KERNEL); if (!attach) return -ENOMEM; sgt = &attach->sgt; ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL); if (ret) { kfree(attach); return ret; } for_each_sgtable_sg(sgt, sg, i) { struct page *page = vmalloc_to_page(vaddr); if (!page) { sg_free_table(sgt); kfree(attach); return -ENOMEM; } sg_set_page(sg, page, PAGE_SIZE, 0); vaddr += PAGE_SIZE; } attach->dma_dir = DMA_NONE; dbuf_attach->priv = attach; return 0; } static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf, struct dma_buf_attachment *db_attach) { struct vb2_vmalloc_attachment *attach = db_attach->priv; struct sg_table *sgt; if (!attach) return; sgt = &attach->sgt; /* release the scatterlist cache */ if (attach->dma_dir != DMA_NONE) dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); sg_free_table(sgt); kfree(attach); db_attach->priv = NULL; } static struct sg_table *vb2_vmalloc_dmabuf_ops_map( struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) { struct vb2_vmalloc_attachment *attach = db_attach->priv; struct sg_table *sgt; sgt = &attach->sgt; /* return previously mapped sg table */ if (attach->dma_dir == dma_dir) return sgt; /* release any previous cache */ if (attach->dma_dir != DMA_NONE) { dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); attach->dma_dir = DMA_NONE; } /* mapping to the client with new direction */ if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) { pr_err("failed to map scatterlist\n"); return ERR_PTR(-EIO); } attach->dma_dir = dma_dir; return sgt; } static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, struct sg_table *sgt, enum dma_data_direction dma_dir) { /* nothing to be done here */ } static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf) { /* drop reference obtained in vb2_vmalloc_get_dmabuf */ vb2_vmalloc_put(dbuf->priv); } static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct iosys_map *map) { struct vb2_vmalloc_buf *buf = dbuf->priv; iosys_map_set_vaddr(map, buf->vaddr); return 0; } static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma) { return vb2_vmalloc_mmap(dbuf->priv, vma); } static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = { .attach = vb2_vmalloc_dmabuf_ops_attach, .detach = vb2_vmalloc_dmabuf_ops_detach, .map_dma_buf = vb2_vmalloc_dmabuf_ops_map, .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap, .vmap = vb2_vmalloc_dmabuf_ops_vmap, .mmap = vb2_vmalloc_dmabuf_ops_mmap, .release = vb2_vmalloc_dmabuf_ops_release, }; static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb, void *buf_priv, unsigned long flags) { struct vb2_vmalloc_buf *buf = buf_priv; struct dma_buf *dbuf; DEFINE_DMA_BUF_EXPORT_INFO(exp_info); exp_info.ops = &vb2_vmalloc_dmabuf_ops; exp_info.size = buf->size; exp_info.flags = flags; exp_info.priv = buf; if (WARN_ON(!buf->vaddr)) return NULL; dbuf = dma_buf_export(&exp_info); if (IS_ERR(dbuf)) return NULL; /* dmabuf keeps reference to vb2 buffer */ refcount_inc(&buf->refcount); return dbuf; } #endif /* CONFIG_HAS_DMA */ /*********************************************/ /* callbacks for DMABUF buffers */ /*********************************************/ static int vb2_vmalloc_map_dmabuf(void *mem_priv) { struct vb2_vmalloc_buf *buf = mem_priv; struct iosys_map map; int ret; ret = dma_buf_vmap_unlocked(buf->dbuf, &map); if (ret) return -EFAULT; buf->vaddr = map.vaddr; return 0; } static void vb2_vmalloc_unmap_dmabuf(void *mem_priv) { struct vb2_vmalloc_buf *buf = mem_priv; struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); dma_buf_vunmap_unlocked(buf->dbuf, &map); buf->vaddr = NULL; } static void vb2_vmalloc_detach_dmabuf(void *mem_priv) { struct vb2_vmalloc_buf *buf = mem_priv; struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); if (buf->vaddr) dma_buf_vunmap_unlocked(buf->dbuf, &map); kfree(buf); } static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb, struct device *dev, struct dma_buf *dbuf, unsigned long size) { struct vb2_vmalloc_buf *buf; if (dbuf->size < size) return ERR_PTR(-EFAULT); buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (!buf) return ERR_PTR(-ENOMEM); buf->dbuf = dbuf; buf->dma_dir = vb->vb2_queue->dma_dir; buf->size = size; return buf; } const struct vb2_mem_ops vb2_vmalloc_memops = { .alloc = vb2_vmalloc_alloc, .put = vb2_vmalloc_put, .get_userptr = vb2_vmalloc_get_userptr, .put_userptr = vb2_vmalloc_put_userptr, #ifdef CONFIG_HAS_DMA .get_dmabuf = vb2_vmalloc_get_dmabuf, #endif .map_dmabuf = vb2_vmalloc_map_dmabuf, .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf, .attach_dmabuf = vb2_vmalloc_attach_dmabuf, .detach_dmabuf = vb2_vmalloc_detach_dmabuf, .vaddr = vb2_vmalloc_vaddr, .mmap = vb2_vmalloc_mmap, .num_users = vb2_vmalloc_num_users, }; EXPORT_SYMBOL_GPL(vb2_vmalloc_memops); MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2"); MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS("DMA_BUF");
28 28 27 27 26 26 27 27 27 28 25 27 26 9 9 9 7 9 10 9 7 1 2 5 4 3 2 2 1 2 2 2 10 2 2 2 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 /* * Routines to compress and uncompress tcp packets (for transmission * over low speed serial lines). * * Copyright (c) 1989 Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms are permitted * provided that the above copyright notice and this paragraph are * duplicated in all such forms and that any documentation, * advertising materials, and other materials related to such * distribution and use acknowledge that the software was developed * by the University of California, Berkeley. The name of the * University may not be used to endorse or promote products derived * from this software without specific prior written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989: * - Initial distribution. * * * modified for KA9Q Internet Software Package by * Katie Stevens (dkstevens@ucdavis.edu) * University of California, Davis * Computing Services * - 01-31-90 initial adaptation (from 1.19) * PPP.05 02-15-90 [ks] * PPP.08 05-02-90 [ks] use PPP protocol field to signal compression * PPP.15 09-90 [ks] improve mbuf handling * PPP.16 11-02 [karn] substantially rewritten to use NOS facilities * * - Feb 1991 Bill_Simpson@um.cc.umich.edu * variable number of conversation slots * allow zero or one slots * separate routines * status display * - Jul 1994 Dmitry Gorodchanin * Fixes for memory leaks. * - Oct 1994 Dmitry Gorodchanin * Modularization. * - Jan 1995 Bjorn Ekwall * Use ip_fast_csum from ip.h * - July 1995 Christos A. Polyzols * Spotted bug in tcp option checking * * * This module is a difficult issue. It's clearly inet code but it's also clearly * driver code belonging close to PPP and SLIP */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/kernel.h> #include <net/slhc_vj.h> #ifdef CONFIG_INET /* Entire module is for IP only */ #include <linux/mm.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/termios.h> #include <linux/in.h> #include <linux/fcntl.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <net/ip.h> #include <net/protocol.h> #include <net/icmp.h> #include <net/tcp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/timer.h> #include <linux/uaccess.h> #include <net/checksum.h> #include <linux/unaligned.h> static unsigned char *encode(unsigned char *cp, unsigned short n); static long decode(unsigned char **cpp); static unsigned char * put16(unsigned char *cp, unsigned short x); static unsigned short pull16(unsigned char **cpp); /* Allocate compression data structure * slots must be in range 0 to 255 (zero meaning no compression) * Returns pointer to structure or ERR_PTR() on error. */ struct slcompress * slhc_init(int rslots, int tslots) { short i; struct cstate *ts; struct slcompress *comp; if (rslots < 0 || rslots > 255 || tslots < 0 || tslots > 255) return ERR_PTR(-EINVAL); comp = kzalloc(sizeof(struct slcompress), GFP_KERNEL); if (! comp) goto out_fail; if (rslots > 0) { size_t rsize = rslots * sizeof(struct cstate); comp->rstate = kzalloc(rsize, GFP_KERNEL); if (! comp->rstate) goto out_free; comp->rslot_limit = rslots - 1; } if (tslots > 0) { size_t tsize = tslots * sizeof(struct cstate); comp->tstate = kzalloc(tsize, GFP_KERNEL); if (! comp->tstate) goto out_free2; comp->tslot_limit = tslots - 1; } comp->xmit_oldest = 0; comp->xmit_current = 255; comp->recv_current = 255; /* * don't accept any packets with implicit index until we get * one with an explicit index. Otherwise the uncompress code * will try to use connection 255, which is almost certainly * out of range */ comp->flags |= SLF_TOSS; if ( tslots > 0 ) { ts = comp->tstate; for(i = comp->tslot_limit; i > 0; --i){ ts[i].cs_this = i; ts[i].next = &(ts[i - 1]); } ts[0].next = &(ts[comp->tslot_limit]); ts[0].cs_this = 0; } return comp; out_free2: kfree(comp->rstate); out_free: kfree(comp); out_fail: return ERR_PTR(-ENOMEM); } /* Free a compression data structure */ void slhc_free(struct slcompress *comp) { if ( IS_ERR_OR_NULL(comp) ) return; if ( comp->tstate != NULLSLSTATE ) kfree( comp->tstate ); if ( comp->rstate != NULLSLSTATE ) kfree( comp->rstate ); kfree( comp ); } /* Put a short in host order into a char array in network order */ static inline unsigned char * put16(unsigned char *cp, unsigned short x) { *cp++ = x >> 8; *cp++ = x; return cp; } /* Encode a number */ static unsigned char * encode(unsigned char *cp, unsigned short n) { if(n >= 256 || n == 0){ *cp++ = 0; cp = put16(cp,n); } else { *cp++ = n; } return cp; } /* Pull a 16-bit integer in host order from buffer in network byte order */ static unsigned short pull16(unsigned char **cpp) { short rval; rval = *(*cpp)++; rval <<= 8; rval |= *(*cpp)++; return rval; } /* Decode a number */ static long decode(unsigned char **cpp) { int x; x = *(*cpp)++; if(x == 0){ return pull16(cpp) & 0xffff; /* pull16 returns -1 on error */ } else { return x & 0xff; /* -1 if PULLCHAR returned error */ } } /* * icp and isize are the original packet. * ocp is a place to put a copy if necessary. * cpp is initially a pointer to icp. If the copy is used, * change it to ocp. */ int slhc_compress(struct slcompress *comp, unsigned char *icp, int isize, unsigned char *ocp, unsigned char **cpp, int compress_cid) { struct cstate *ocs = &(comp->tstate[comp->xmit_oldest]); struct cstate *lcs = ocs; struct cstate *cs = lcs->next; unsigned long deltaS, deltaA; short changes = 0; int nlen, hlen; unsigned char new_seq[16]; unsigned char *cp = new_seq; struct iphdr *ip; struct tcphdr *th, *oth; __sum16 csum; /* * Don't play with runt packets. */ if(isize<sizeof(struct iphdr)) return isize; ip = (struct iphdr *) icp; if (ip->version != 4 || ip->ihl < 5) return isize; /* Bail if this packet isn't TCP, or is an IP fragment */ if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) { /* Send as regular IP */ if(ip->protocol != IPPROTO_TCP) comp->sls_o_nontcp++; else comp->sls_o_tcp++; return isize; } nlen = ip->ihl * 4; if (isize < nlen + sizeof(*th)) return isize; th = (struct tcphdr *)(icp + nlen); if (th->doff < sizeof(struct tcphdr) / 4) return isize; hlen = nlen + th->doff * 4; /* Bail if the TCP packet isn't `compressible' (i.e., ACK isn't set or * some other control bit is set). Also uncompressible if * it's a runt. */ if(hlen > isize || th->syn || th->fin || th->rst || ! (th->ack)){ /* TCP connection stuff; send as regular IP */ comp->sls_o_tcp++; return isize; } /* * Packet is compressible -- we're going to send either a * COMPRESSED_TCP or UNCOMPRESSED_TCP packet. Either way, * we need to locate (or create) the connection state. * * States are kept in a circularly linked list with * xmit_oldest pointing to the end of the list. The * list is kept in lru order by moving a state to the * head of the list whenever it is referenced. Since * the list is short and, empirically, the connection * we want is almost always near the front, we locate * states via linear search. If we don't find a state * for the datagram, the oldest state is (re-)used. */ for ( ; ; ) { if( ip->saddr == cs->cs_ip.saddr && ip->daddr == cs->cs_ip.daddr && th->source == cs->cs_tcp.source && th->dest == cs->cs_tcp.dest) goto found; /* if current equal oldest, at end of list */ if ( cs == ocs ) break; lcs = cs; cs = cs->next; comp->sls_o_searches++; } /* * Didn't find it -- re-use oldest cstate. Send an * uncompressed packet that tells the other side what * connection number we're using for this conversation. * * Note that since the state list is circular, the oldest * state points to the newest and we only need to set * xmit_oldest to update the lru linkage. */ comp->sls_o_misses++; comp->xmit_oldest = lcs->cs_this; goto uncompressed; found: /* * Found it -- move to the front on the connection list. */ if(lcs == ocs) { /* found at most recently used */ } else if (cs == ocs) { /* found at least recently used */ comp->xmit_oldest = lcs->cs_this; } else { /* more than 2 elements */ lcs->next = cs->next; cs->next = ocs->next; ocs->next = cs; } /* * Make sure that only what we expect to change changed. * Check the following: * IP protocol version, header length & type of service. * The "Don't fragment" bit. * The time-to-live field. * The TCP header length. * IP options, if any. * TCP options, if any. * If any of these things are different between the previous & * current datagram, we send the current datagram `uncompressed'. */ oth = &cs->cs_tcp; if(ip->version != cs->cs_ip.version || ip->ihl != cs->cs_ip.ihl || ip->tos != cs->cs_ip.tos || (ip->frag_off & htons(0x4000)) != (cs->cs_ip.frag_off & htons(0x4000)) || ip->ttl != cs->cs_ip.ttl || th->doff != cs->cs_tcp.doff || (ip->ihl > 5 && memcmp(ip+1,cs->cs_ipopt,((ip->ihl)-5)*4) != 0) || (th->doff > 5 && memcmp(th+1,cs->cs_tcpopt,((th->doff)-5)*4) != 0)){ goto uncompressed; } /* * Figure out which of the changing fields changed. The * receiver expects changes in the order: urgent, window, * ack, seq (the order minimizes the number of temporaries * needed in this section of code). */ if(th->urg){ deltaS = ntohs(th->urg_ptr); cp = encode(cp,deltaS); changes |= NEW_U; } else if(th->urg_ptr != oth->urg_ptr){ /* argh! URG not set but urp changed -- a sensible * implementation should never do this but RFC793 * doesn't prohibit the change so we have to deal * with it. */ goto uncompressed; } if((deltaS = ntohs(th->window) - ntohs(oth->window)) != 0){ cp = encode(cp,deltaS); changes |= NEW_W; } if((deltaA = ntohl(th->ack_seq) - ntohl(oth->ack_seq)) != 0L){ if(deltaA > 0x0000ffff) goto uncompressed; cp = encode(cp,deltaA); changes |= NEW_A; } if((deltaS = ntohl(th->seq) - ntohl(oth->seq)) != 0L){ if(deltaS > 0x0000ffff) goto uncompressed; cp = encode(cp,deltaS); changes |= NEW_S; } switch(changes){ case 0: /* Nothing changed. If this packet contains data and the * last one didn't, this is probably a data packet following * an ack (normal on an interactive connection) and we send * it compressed. Otherwise it's probably a retransmit, * retransmitted ack or window probe. Send it uncompressed * in case the other side missed the compressed version. */ if(ip->tot_len != cs->cs_ip.tot_len && ntohs(cs->cs_ip.tot_len) == hlen) break; goto uncompressed; case SPECIAL_I: case SPECIAL_D: /* actual changes match one of our special case encodings -- * send packet uncompressed. */ goto uncompressed; case NEW_S|NEW_A: if(deltaS == deltaA && deltaS == ntohs(cs->cs_ip.tot_len) - hlen){ /* special case for echoed terminal traffic */ changes = SPECIAL_I; cp = new_seq; } break; case NEW_S: if(deltaS == ntohs(cs->cs_ip.tot_len) - hlen){ /* special case for data xfer */ changes = SPECIAL_D; cp = new_seq; } break; } deltaS = ntohs(ip->id) - ntohs(cs->cs_ip.id); if(deltaS != 1){ cp = encode(cp,deltaS); changes |= NEW_I; } if(th->psh) changes |= TCP_PUSH_BIT; /* Grab the cksum before we overwrite it below. Then update our * state with this packet's header. */ csum = th->check; memcpy(&cs->cs_ip,ip,20); memcpy(&cs->cs_tcp,th,20); /* We want to use the original packet as our compressed packet. * (cp - new_seq) is the number of bytes we need for compressed * sequence numbers. In addition we need one byte for the change * mask, one for the connection id and two for the tcp checksum. * So, (cp - new_seq) + 4 bytes of header are needed. */ deltaS = cp - new_seq; if(compress_cid == 0 || comp->xmit_current != cs->cs_this){ cp = ocp; *cpp = ocp; *cp++ = changes | NEW_C; *cp++ = cs->cs_this; comp->xmit_current = cs->cs_this; } else { cp = ocp; *cpp = ocp; *cp++ = changes; } *(__sum16 *)cp = csum; cp += 2; /* deltaS is now the size of the change section of the compressed header */ memcpy(cp,new_seq,deltaS); /* Write list of deltas */ memcpy(cp+deltaS,icp+hlen,isize-hlen); comp->sls_o_compressed++; ocp[0] |= SL_TYPE_COMPRESSED_TCP; return isize - hlen + deltaS + (cp - ocp); /* Update connection state cs & send uncompressed packet (i.e., * a regular ip/tcp packet but with the 'conversation id' we hope * to use on future compressed packets in the protocol field). */ uncompressed: memcpy(&cs->cs_ip,ip,20); memcpy(&cs->cs_tcp,th,20); if (ip->ihl > 5) memcpy(cs->cs_ipopt, ip+1, ((ip->ihl) - 5) * 4); if (th->doff > 5) memcpy(cs->cs_tcpopt, th+1, ((th->doff) - 5) * 4); comp->xmit_current = cs->cs_this; comp->sls_o_uncompressed++; memcpy(ocp, icp, isize); *cpp = ocp; ocp[9] = cs->cs_this; ocp[0] |= SL_TYPE_UNCOMPRESSED_TCP; return isize; } int slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize) { int changes; long x; struct tcphdr *thp; struct iphdr *ip; struct cstate *cs; int len, hdrlen; unsigned char *cp = icp; /* We've got a compressed packet; read the change byte */ comp->sls_i_compressed++; if(isize < 3){ comp->sls_i_error++; return 0; } changes = *cp++; if(changes & NEW_C){ /* Make sure the state index is in range, then grab the state. * If we have a good state index, clear the 'discard' flag. */ x = *cp++; /* Read conn index */ if(x < 0 || x > comp->rslot_limit) goto bad; /* Check if the cstate is initialized */ if (!comp->rstate[x].initialized) goto bad; comp->flags &=~ SLF_TOSS; comp->recv_current = x; } else { /* this packet has an implicit state index. If we've * had a line error since the last time we got an * explicit state index, we have to toss the packet. */ if(comp->flags & SLF_TOSS){ comp->sls_i_tossed++; return 0; } } cs = &comp->rstate[comp->recv_current]; thp = &cs->cs_tcp; ip = &cs->cs_ip; thp->check = *(__sum16 *)cp; cp += 2; thp->psh = (changes & TCP_PUSH_BIT) ? 1 : 0; /* * we can use the same number for the length of the saved header and * the current one, because the packet wouldn't have been sent * as compressed unless the options were the same as the previous one */ hdrlen = ip->ihl * 4 + thp->doff * 4; switch(changes & SPECIALS_MASK){ case SPECIAL_I: /* Echoed terminal traffic */ { short i; i = ntohs(ip->tot_len) - hdrlen; thp->ack_seq = htonl( ntohl(thp->ack_seq) + i); thp->seq = htonl( ntohl(thp->seq) + i); } break; case SPECIAL_D: /* Unidirectional data */ thp->seq = htonl( ntohl(thp->seq) + ntohs(ip->tot_len) - hdrlen); break; default: if(changes & NEW_U){ thp->urg = 1; if((x = decode(&cp)) == -1) { goto bad; } thp->urg_ptr = htons(x); } else thp->urg = 0; if(changes & NEW_W){ if((x = decode(&cp)) == -1) { goto bad; } thp->window = htons( ntohs(thp->window) + x); } if(changes & NEW_A){ if((x = decode(&cp)) == -1) { goto bad; } thp->ack_seq = htonl( ntohl(thp->ack_seq) + x); } if(changes & NEW_S){ if((x = decode(&cp)) == -1) { goto bad; } thp->seq = htonl( ntohl(thp->seq) + x); } break; } if(changes & NEW_I){ if((x = decode(&cp)) == -1) { goto bad; } ip->id = htons (ntohs (ip->id) + x); } else ip->id = htons (ntohs (ip->id) + 1); /* * At this point, cp points to the first byte of data in the * packet. Put the reconstructed TCP and IP headers back on the * packet. Recalculate IP checksum (but not TCP checksum). */ len = isize - (cp - icp); if (len < 0) goto bad; len += hdrlen; ip->tot_len = htons(len); ip->check = 0; memmove(icp + hdrlen, cp, len - hdrlen); cp = icp; memcpy(cp, ip, 20); cp += 20; if (ip->ihl > 5) { memcpy(cp, cs->cs_ipopt, (ip->ihl - 5) * 4); cp += (ip->ihl - 5) * 4; } put_unaligned(ip_fast_csum(icp, ip->ihl), &((struct iphdr *)icp)->check); memcpy(cp, thp, 20); cp += 20; if (thp->doff > 5) { memcpy(cp, cs->cs_tcpopt, ((thp->doff) - 5) * 4); cp += ((thp->doff) - 5) * 4; } return len; bad: comp->sls_i_error++; return slhc_toss( comp ); } int slhc_remember(struct slcompress *comp, unsigned char *icp, int isize) { const struct tcphdr *th; unsigned char index; struct iphdr *iph; struct cstate *cs; unsigned int ihl; /* The packet is shorter than a legal IP header. * Also make sure isize is positive. */ if (isize < (int)sizeof(struct iphdr)) { runt: comp->sls_i_runt++; return slhc_toss(comp); } iph = (struct iphdr *)icp; /* Peek at the IP header's IHL field to find its length */ ihl = iph->ihl; /* The IP header length field is too small, * or packet is shorter than the IP header followed * by minimal tcp header. */ if (ihl < 5 || isize < ihl * 4 + sizeof(struct tcphdr)) goto runt; index = iph->protocol; iph->protocol = IPPROTO_TCP; if (ip_fast_csum(icp, ihl)) { /* Bad IP header checksum; discard */ comp->sls_i_badcheck++; return slhc_toss(comp); } if (index > comp->rslot_limit) { comp->sls_i_error++; return slhc_toss(comp); } th = (struct tcphdr *)(icp + ihl * 4); if (th->doff < sizeof(struct tcphdr) / 4) goto runt; if (isize < ihl * 4 + th->doff * 4) goto runt; /* Update local state */ cs = &comp->rstate[comp->recv_current = index]; comp->flags &=~ SLF_TOSS; memcpy(&cs->cs_ip, iph, sizeof(*iph)); memcpy(&cs->cs_tcp, th, sizeof(*th)); if (ihl > 5) memcpy(cs->cs_ipopt, &iph[1], (ihl - 5) * 4); if (th->doff > 5) memcpy(cs->cs_tcpopt, &th[1], (th->doff - 5) * 4); cs->cs_hsize = ihl*2 + th->doff*2; cs->initialized = true; /* Put headers back on packet * Neither header checksum is recalculated */ comp->sls_i_uncompressed++; return isize; } int slhc_toss(struct slcompress *comp) { if ( comp == NULLSLCOMPR ) return 0; comp->flags |= SLF_TOSS; return 0; } #else /* CONFIG_INET */ int slhc_toss(struct slcompress *comp) { printk(KERN_DEBUG "Called IP function on non IP-system: slhc_toss"); return -EINVAL; } int slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize) { printk(KERN_DEBUG "Called IP function on non IP-system: slhc_uncompress"); return -EINVAL; } int slhc_compress(struct slcompress *comp, unsigned char *icp, int isize, unsigned char *ocp, unsigned char **cpp, int compress_cid) { printk(KERN_DEBUG "Called IP function on non IP-system: slhc_compress"); return -EINVAL; } int slhc_remember(struct slcompress *comp, unsigned char *icp, int isize) { printk(KERN_DEBUG "Called IP function on non IP-system: slhc_remember"); return -EINVAL; } void slhc_free(struct slcompress *comp) { printk(KERN_DEBUG "Called IP function on non IP-system: slhc_free"); } struct slcompress * slhc_init(int rslots, int tslots) { printk(KERN_DEBUG "Called IP function on non IP-system: slhc_init"); return NULL; } #endif /* CONFIG_INET */ /* VJ header compression */ EXPORT_SYMBOL(slhc_init); EXPORT_SYMBOL(slhc_free); EXPORT_SYMBOL(slhc_remember); EXPORT_SYMBOL(slhc_compress); EXPORT_SYMBOL(slhc_uncompress); EXPORT_SYMBOL(slhc_toss); MODULE_DESCRIPTION("Compression helpers for SLIP (serial line)"); MODULE_LICENSE("Dual BSD/GPL");
1 7 3 5 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 // SPDX-License-Identifier: GPL-2.0-only /* * debugfs code for HSR & PRP * Copyright (C) 2019 Texas Instruments Incorporated * * Author(s): * Murali Karicheri <m-karicheri2@ti.com> */ #include <linux/module.h> #include <linux/errno.h> #include <linux/debugfs.h> #include "hsr_main.h" #include "hsr_framereg.h" static struct dentry *hsr_debugfs_root_dir; /* hsr_node_table_show - Formats and prints node_table entries */ static int hsr_node_table_show(struct seq_file *sfp, void *data) { struct hsr_priv *priv = (struct hsr_priv *)sfp->private; struct hsr_node *node; seq_printf(sfp, "Node Table entries for (%s) device\n", (priv->prot_version == PRP_V1 ? "PRP" : "HSR")); seq_puts(sfp, "MAC-Address-A, MAC-Address-B, time_in[A], "); seq_puts(sfp, "time_in[B], Address-B port, "); if (priv->prot_version == PRP_V1) seq_puts(sfp, "SAN-A, SAN-B, DAN-P\n"); else seq_puts(sfp, "DAN-H\n"); rcu_read_lock(); list_for_each_entry_rcu(node, &priv->node_db, mac_list) { /* skip self node */ if (hsr_addr_is_self(priv, node->macaddress_A)) continue; seq_printf(sfp, "%pM ", &node->macaddress_A[0]); seq_printf(sfp, "%pM ", &node->macaddress_B[0]); seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_A]); seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_B]); seq_printf(sfp, "%14x, ", node->addr_B_port); if (priv->prot_version == PRP_V1) seq_printf(sfp, "%5x, %5x, %5x\n", node->san_a, node->san_b, (node->san_a == 0 && node->san_b == 0)); else seq_printf(sfp, "%5x\n", 1); } rcu_read_unlock(); return 0; } DEFINE_SHOW_ATTRIBUTE(hsr_node_table); void hsr_debugfs_rename(struct net_device *dev) { struct hsr_priv *priv = netdev_priv(dev); int err; err = debugfs_change_name(priv->node_tbl_root, "%s", dev->name); if (err) netdev_warn(dev, "failed to rename\n"); } /* hsr_debugfs_init - create hsr node_table file for dumping * the node table * * Description: * When debugfs is configured this routine sets up the node_table file per * hsr device for dumping the node_table entries */ void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev) { struct dentry *de = NULL; de = debugfs_create_dir(hsr_dev->name, hsr_debugfs_root_dir); if (IS_ERR(de)) { pr_err("Cannot create hsr debugfs directory\n"); return; } priv->node_tbl_root = de; de = debugfs_create_file("node_table", S_IFREG | 0444, priv->node_tbl_root, priv, &hsr_node_table_fops); if (IS_ERR(de)) { pr_err("Cannot create hsr node_table file\n"); debugfs_remove(priv->node_tbl_root); priv->node_tbl_root = NULL; return; } } /* hsr_debugfs_term - Tear down debugfs intrastructure * * Description: * When Debugfs is configured this routine removes debugfs file system * elements that are specific to hsr */ void hsr_debugfs_term(struct hsr_priv *priv) { debugfs_remove_recursive(priv->node_tbl_root); priv->node_tbl_root = NULL; } void hsr_debugfs_create_root(void) { hsr_debugfs_root_dir = debugfs_create_dir("hsr", NULL); if (IS_ERR(hsr_debugfs_root_dir)) { pr_err("Cannot create hsr debugfs root directory\n"); hsr_debugfs_root_dir = NULL; } } void hsr_debugfs_remove_root(void) { /* debugfs_remove() internally checks NULL and ERROR */ debugfs_remove(hsr_debugfs_root_dir); }
26 29 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM alarmtimer #if !defined(_TRACE_ALARMTIMER_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_ALARMTIMER_H #include <linux/alarmtimer.h> #include <linux/rtc.h> #include <linux/tracepoint.h> TRACE_DEFINE_ENUM(ALARM_REALTIME); TRACE_DEFINE_ENUM(ALARM_BOOTTIME); TRACE_DEFINE_ENUM(ALARM_REALTIME_FREEZER); TRACE_DEFINE_ENUM(ALARM_BOOTTIME_FREEZER); #define show_alarm_type(type) __print_flags(type, " | ", \ { 1 << ALARM_REALTIME, "REALTIME" }, \ { 1 << ALARM_BOOTTIME, "BOOTTIME" }, \ { 1 << ALARM_REALTIME_FREEZER, "REALTIME Freezer" }, \ { 1 << ALARM_BOOTTIME_FREEZER, "BOOTTIME Freezer" }) #ifdef CONFIG_RTC_CLASS TRACE_EVENT(alarmtimer_suspend, TP_PROTO(ktime_t expires, int flag), TP_ARGS(expires, flag), TP_STRUCT__entry( __field(s64, expires) __field(unsigned char, alarm_type) ), TP_fast_assign( __entry->expires = expires; __entry->alarm_type = flag; ), TP_printk("alarmtimer type:%s expires:%llu", show_alarm_type((1 << __entry->alarm_type)), __entry->expires ) ); #endif /* CONFIG_RTC_CLASS */ DECLARE_EVENT_CLASS(alarm_class, TP_PROTO(struct alarm *alarm, ktime_t now), TP_ARGS(alarm, now), TP_STRUCT__entry( __field(void *, alarm) __field(unsigned char, alarm_type) __field(s64, expires) __field(s64, now) ), TP_fast_assign( __entry->alarm = alarm; __entry->alarm_type = alarm->type; __entry->expires = alarm->node.expires; __entry->now = now; ), TP_printk("alarmtimer:%p type:%s expires:%llu now:%llu", __entry->alarm, show_alarm_type((1 << __entry->alarm_type)), __entry->expires, __entry->now ) ); DEFINE_EVENT(alarm_class, alarmtimer_fired, TP_PROTO(struct alarm *alarm, ktime_t now), TP_ARGS(alarm, now) ); DEFINE_EVENT(alarm_class, alarmtimer_start, TP_PROTO(struct alarm *alarm, ktime_t now), TP_ARGS(alarm, now) ); DEFINE_EVENT(alarm_class, alarmtimer_cancel, TP_PROTO(struct alarm *alarm, ktime_t now), TP_ARGS(alarm, now) ); #endif /* _TRACE_ALARMTIMER_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
26 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KDEV_T_H #define _LINUX_KDEV_T_H #include <uapi/linux/kdev_t.h> #define MINORBITS 20 #define MINORMASK ((1U << MINORBITS) - 1) #define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS)) #define MINOR(dev) ((unsigned int) ((dev) & MINORMASK)) #define MKDEV(ma,mi) (((ma) << MINORBITS) | (mi)) #define print_dev_t(buffer, dev) \ sprintf((buffer), "%u:%u\n", MAJOR(dev), MINOR(dev)) #define format_dev_t(buffer, dev) \ ({ \ sprintf(buffer, "%u:%u", MAJOR(dev), MINOR(dev)); \ buffer; \ }) /* acceptable for old filesystems */ static __always_inline bool old_valid_dev(dev_t dev) { return MAJOR(dev) < 256 && MINOR(dev) < 256; } static __always_inline u16 old_encode_dev(dev_t dev) { return (MAJOR(dev) << 8) | MINOR(dev); } static __always_inline dev_t old_decode_dev(u16 val) { return MKDEV((val >> 8) & 255, val & 255); } static __always_inline u32 new_encode_dev(dev_t dev) { unsigned major = MAJOR(dev); unsigned minor = MINOR(dev); return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); } static __always_inline dev_t new_decode_dev(u32 dev) { unsigned major = (dev & 0xfff00) >> 8; unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00); return MKDEV(major, minor); } static __always_inline u64 huge_encode_dev(dev_t dev) { return new_encode_dev(dev); } static __always_inline dev_t huge_decode_dev(u64 dev) { return new_decode_dev(dev); } static __always_inline int sysv_valid_dev(dev_t dev) { return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18); } static __always_inline u32 sysv_encode_dev(dev_t dev) { return MINOR(dev) | (MAJOR(dev) << 18); } static __always_inline unsigned sysv_major(u32 dev) { return (dev >> 18) & 0x3fff; } static __always_inline unsigned sysv_minor(u32 dev) { return dev & 0x3ffff; } #endif
1 33 33 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 /* SPDX-License-Identifier: GPL-2.0 */ /* * Task I/O accounting operations */ #ifndef __TASK_IO_ACCOUNTING_OPS_INCLUDED #define __TASK_IO_ACCOUNTING_OPS_INCLUDED #include <linux/sched.h> #ifdef CONFIG_TASK_IO_ACCOUNTING static inline void task_io_account_read(size_t bytes) { current->ioac.read_bytes += bytes; } /* * We approximate number of blocks, because we account bytes only. * A 'block' is 512 bytes */ static inline unsigned long task_io_get_inblock(const struct task_struct *p) { return p->ioac.read_bytes >> 9; } static inline void task_io_account_write(size_t bytes) { current->ioac.write_bytes += bytes; } /* * We approximate number of blocks, because we account bytes only. * A 'block' is 512 bytes */ static inline unsigned long task_io_get_oublock(const struct task_struct *p) { return p->ioac.write_bytes >> 9; } static inline void task_io_account_cancelled_write(size_t bytes) { current->ioac.cancelled_write_bytes += bytes; } static inline void task_io_accounting_init(struct task_io_accounting *ioac) { memset(ioac, 0, sizeof(*ioac)); } static inline void task_blk_io_accounting_add(struct task_io_accounting *dst, struct task_io_accounting *src) { dst->read_bytes += src->read_bytes; dst->write_bytes += src->write_bytes; dst->cancelled_write_bytes += src->cancelled_write_bytes; } #else static inline void task_io_account_read(size_t bytes) { } static inline unsigned long task_io_get_inblock(const struct task_struct *p) { return 0; } static inline void task_io_account_write(size_t bytes) { } static inline unsigned long task_io_get_oublock(const struct task_struct *p) { return 0; } static inline void task_io_account_cancelled_write(size_t bytes) { } static inline void task_io_accounting_init(struct task_io_accounting *ioac) { } static inline void task_blk_io_accounting_add(struct task_io_accounting *dst, struct task_io_accounting *src) { } #endif /* CONFIG_TASK_IO_ACCOUNTING */ #ifdef CONFIG_TASK_XACCT static inline void task_chr_io_accounting_add(struct task_io_accounting *dst, struct task_io_accounting *src) { dst->rchar += src->rchar; dst->wchar += src->wchar; dst->syscr += src->syscr; dst->syscw += src->syscw; } #else static inline void task_chr_io_accounting_add(struct task_io_accounting *dst, struct task_io_accounting *src) { } #endif /* CONFIG_TASK_XACCT */ static inline void task_io_accounting_add(struct task_io_accounting *dst, struct task_io_accounting *src) { task_chr_io_accounting_add(dst, src); task_blk_io_accounting_add(dst, src); } #endif /* __TASK_IO_ACCOUNTING_OPS_INCLUDED */
15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 /* * Copyright © 2006 Keith Packard * Copyright © 2007-2008 Dave Airlie * Copyright © 2007-2008 Intel Corporation * Jesse Barnes <jesse.barnes@intel.com> * Copyright © 2014 Intel Corporation * Daniel Vetter <daniel.vetter@ffwll.ch> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #ifndef __DRM_MODES_H__ #define __DRM_MODES_H__ #include <linux/hdmi.h> #include <drm/drm_mode_object.h> #include <drm/drm_connector.h> struct videomode; /* * Note on terminology: here, for brevity and convenience, we refer to connector * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS, * DVI, etc. And 'screen' refers to the whole of the visible display, which * may span multiple monitors (and therefore multiple CRTC and connector * structures). */ /** * enum drm_mode_status - hardware support status of a mode * @MODE_OK: Mode OK * @MODE_HSYNC: hsync out of range * @MODE_VSYNC: vsync out of range * @MODE_H_ILLEGAL: mode has illegal horizontal timings * @MODE_V_ILLEGAL: mode has illegal vertical timings * @MODE_BAD_WIDTH: requires an unsupported linepitch * @MODE_NOMODE: no mode with a matching name * @MODE_NO_INTERLACE: interlaced mode not supported * @MODE_NO_DBLESCAN: doublescan mode not supported * @MODE_NO_VSCAN: multiscan mode not supported * @MODE_MEM: insufficient video memory * @MODE_VIRTUAL_X: mode width too large for specified virtual size * @MODE_VIRTUAL_Y: mode height too large for specified virtual size * @MODE_MEM_VIRT: insufficient video memory given virtual size * @MODE_NOCLOCK: no fixed clock available * @MODE_CLOCK_HIGH: clock required is too high * @MODE_CLOCK_LOW: clock required is too low * @MODE_CLOCK_RANGE: clock/mode isn't in a ClockRange * @MODE_BAD_HVALUE: horizontal timing was out of range * @MODE_BAD_VVALUE: vertical timing was out of range * @MODE_BAD_VSCAN: VScan value out of range * @MODE_HSYNC_NARROW: horizontal sync too narrow * @MODE_HSYNC_WIDE: horizontal sync too wide * @MODE_HBLANK_NARROW: horizontal blanking too narrow * @MODE_HBLANK_WIDE: horizontal blanking too wide * @MODE_VSYNC_NARROW: vertical sync too narrow * @MODE_VSYNC_WIDE: vertical sync too wide * @MODE_VBLANK_NARROW: vertical blanking too narrow * @MODE_VBLANK_WIDE: vertical blanking too wide * @MODE_PANEL: exceeds panel dimensions * @MODE_INTERLACE_WIDTH: width too large for interlaced mode * @MODE_ONE_WIDTH: only one width is supported * @MODE_ONE_HEIGHT: only one height is supported * @MODE_ONE_SIZE: only one resolution is supported * @MODE_NO_REDUCED: monitor doesn't accept reduced blanking * @MODE_NO_STEREO: stereo modes not supported * @MODE_NO_420: ycbcr 420 modes not supported * @MODE_STALE: mode has become stale * @MODE_BAD: unspecified reason * @MODE_ERROR: error condition * * This enum is used to filter out modes not supported by the driver/hardware * combination. */ enum drm_mode_status { MODE_OK = 0, MODE_HSYNC, MODE_VSYNC, MODE_H_ILLEGAL, MODE_V_ILLEGAL, MODE_BAD_WIDTH, MODE_NOMODE, MODE_NO_INTERLACE, MODE_NO_DBLESCAN, MODE_NO_VSCAN, MODE_MEM, MODE_VIRTUAL_X, MODE_VIRTUAL_Y, MODE_MEM_VIRT, MODE_NOCLOCK, MODE_CLOCK_HIGH, MODE_CLOCK_LOW, MODE_CLOCK_RANGE, MODE_BAD_HVALUE, MODE_BAD_VVALUE, MODE_BAD_VSCAN, MODE_HSYNC_NARROW, MODE_HSYNC_WIDE, MODE_HBLANK_NARROW, MODE_HBLANK_WIDE, MODE_VSYNC_NARROW, MODE_VSYNC_WIDE, MODE_VBLANK_NARROW, MODE_VBLANK_WIDE, MODE_PANEL, MODE_INTERLACE_WIDTH, MODE_ONE_WIDTH, MODE_ONE_HEIGHT, MODE_ONE_SIZE, MODE_NO_REDUCED, MODE_NO_STEREO, MODE_NO_420, MODE_STALE = -3, MODE_BAD = -2, MODE_ERROR = -1 }; #define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \ .name = nm, .status = 0, .type = (t), .clock = (c), \ .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ .vscan = (vs), .flags = (f) /** * DRM_MODE_RES_MM - Calculates the display size from resolution and DPI * @res: The resolution in pixel * @dpi: The number of dots per inch */ #define DRM_MODE_RES_MM(res, dpi) \ (((res) * 254ul) / ((dpi) * 10ul)) #define __DRM_MODE_INIT(pix, hd, vd, hd_mm, vd_mm) \ .type = DRM_MODE_TYPE_DRIVER, .clock = (pix), \ .hdisplay = (hd), .hsync_start = (hd), .hsync_end = (hd), \ .htotal = (hd), .vdisplay = (vd), .vsync_start = (vd), \ .vsync_end = (vd), .vtotal = (vd), .width_mm = (hd_mm), \ .height_mm = (vd_mm) /** * DRM_MODE_INIT - Initialize display mode * @hz: Vertical refresh rate in Hertz * @hd: Horizontal resolution, width * @vd: Vertical resolution, height * @hd_mm: Display width in millimeters * @vd_mm: Display height in millimeters * * This macro initializes a &drm_display_mode that contains information about * refresh rate, resolution and physical size. */ #define DRM_MODE_INIT(hz, hd, vd, hd_mm, vd_mm) \ __DRM_MODE_INIT((hd) * (vd) * (hz) / 1000 /* kHz */, hd, vd, hd_mm, vd_mm) /** * DRM_SIMPLE_MODE - Simple display mode * @hd: Horizontal resolution, width * @vd: Vertical resolution, height * @hd_mm: Display width in millimeters * @vd_mm: Display height in millimeters * * This macro initializes a &drm_display_mode that only contains info about * resolution and physical size. */ #define DRM_SIMPLE_MODE(hd, vd, hd_mm, vd_mm) \ __DRM_MODE_INIT(1 /* pass validation */, hd, vd, hd_mm, vd_mm) #define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */ #define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */ #define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */ #define CRTC_NO_VSCAN (1 << 3) /* don't adjust doublescan */ #define CRTC_STEREO_DOUBLE_ONLY (CRTC_STEREO_DOUBLE | CRTC_NO_DBLSCAN | CRTC_NO_VSCAN) #define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF #define DRM_MODE_MATCH_TIMINGS (1 << 0) #define DRM_MODE_MATCH_CLOCK (1 << 1) #define DRM_MODE_MATCH_FLAGS (1 << 2) #define DRM_MODE_MATCH_3D_FLAGS (1 << 3) #define DRM_MODE_MATCH_ASPECT_RATIO (1 << 4) /** * struct drm_display_mode - DRM kernel-internal display mode structure * @hdisplay: horizontal display size * @hsync_start: horizontal sync start * @hsync_end: horizontal sync end * @htotal: horizontal total size * @hskew: horizontal skew?! * @vdisplay: vertical display size * @vsync_start: vertical sync start * @vsync_end: vertical sync end * @vtotal: vertical total size * @vscan: vertical scan?! * @crtc_hdisplay: hardware mode horizontal display size * @crtc_hblank_start: hardware mode horizontal blank start * @crtc_hblank_end: hardware mode horizontal blank end * @crtc_hsync_start: hardware mode horizontal sync start * @crtc_hsync_end: hardware mode horizontal sync end * @crtc_htotal: hardware mode horizontal total size * @crtc_hskew: hardware mode horizontal skew?! * @crtc_vdisplay: hardware mode vertical display size * @crtc_vblank_start: hardware mode vertical blank start * @crtc_vblank_end: hardware mode vertical blank end * @crtc_vsync_start: hardware mode vertical sync start * @crtc_vsync_end: hardware mode vertical sync end * @crtc_vtotal: hardware mode vertical total size * * This is the kernel API display mode information structure. For the * user-space version see struct drm_mode_modeinfo. * * The horizontal and vertical timings are defined per the following diagram. * * :: * * * Active Front Sync Back * Region Porch Porch * <-----------------------><----------------><-------------><--------------> * //////////////////////| * ////////////////////// | * ////////////////////// |.................. ................ * _______________ * <----- [hv]display -----> * <------------- [hv]sync_start ------------> * <--------------------- [hv]sync_end ---------------------> * <-------------------------------- [hv]total ----------------------------->* * * This structure contains two copies of timings. First are the plain timings, * which specify the logical mode, as it would be for a progressive 1:1 scanout * at the refresh rate userspace can observe through vblank timestamps. Then * there's the hardware timings, which are corrected for interlacing, * double-clocking and similar things. They are provided as a convenience, and * can be appropriately computed using drm_mode_set_crtcinfo(). * * For printing you can use %DRM_MODE_FMT and DRM_MODE_ARG(). */ struct drm_display_mode { /** * @clock: * * Pixel clock in kHz. */ int clock; /* in kHz */ u16 hdisplay; u16 hsync_start; u16 hsync_end; u16 htotal; u16 hskew; u16 vdisplay; u16 vsync_start; u16 vsync_end; u16 vtotal; u16 vscan; /** * @flags: * * Sync and timing flags: * * - DRM_MODE_FLAG_PHSYNC: horizontal sync is active high. * - DRM_MODE_FLAG_NHSYNC: horizontal sync is active low. * - DRM_MODE_FLAG_PVSYNC: vertical sync is active high. * - DRM_MODE_FLAG_NVSYNC: vertical sync is active low. * - DRM_MODE_FLAG_INTERLACE: mode is interlaced. * - DRM_MODE_FLAG_DBLSCAN: mode uses doublescan. * - DRM_MODE_FLAG_CSYNC: mode uses composite sync. * - DRM_MODE_FLAG_PCSYNC: composite sync is active high. * - DRM_MODE_FLAG_NCSYNC: composite sync is active low. * - DRM_MODE_FLAG_HSKEW: hskew provided (not used?). * - DRM_MODE_FLAG_BCAST: <deprecated> * - DRM_MODE_FLAG_PIXMUX: <deprecated> * - DRM_MODE_FLAG_DBLCLK: double-clocked mode. * - DRM_MODE_FLAG_CLKDIV2: half-clocked mode. * * Additionally there's flags to specify how 3D modes are packed: * * - DRM_MODE_FLAG_3D_NONE: normal, non-3D mode. * - DRM_MODE_FLAG_3D_FRAME_PACKING: 2 full frames for left and right. * - DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE: interleaved like fields. * - DRM_MODE_FLAG_3D_LINE_ALTERNATIVE: interleaved lines. * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL: side-by-side full frames. * - DRM_MODE_FLAG_3D_L_DEPTH: ? * - DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH: ? * - DRM_MODE_FLAG_3D_TOP_AND_BOTTOM: frame split into top and bottom * parts. * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF: frame split into left and * right parts. */ u32 flags; /** * @crtc_clock: * * Actual pixel or dot clock in the hardware. This differs from the * logical @clock when e.g. using interlacing, double-clocking, stereo * modes or other fancy stuff that changes the timings and signals * actually sent over the wire. * * This is again in kHz. * * Note that with digital outputs like HDMI or DP there's usually a * massive confusion between the dot clock and the signal clock at the * bit encoding level. Especially when a 8b/10b encoding is used and the * difference is exactly a factor of 10. */ int crtc_clock; u16 crtc_hdisplay; u16 crtc_hblank_start; u16 crtc_hblank_end; u16 crtc_hsync_start; u16 crtc_hsync_end; u16 crtc_htotal; u16 crtc_hskew; u16 crtc_vdisplay; u16 crtc_vblank_start; u16 crtc_vblank_end; u16 crtc_vsync_start; u16 crtc_vsync_end; u16 crtc_vtotal; /** * @width_mm: * * Addressable size of the output in mm, projectors should set this to * 0. */ u16 width_mm; /** * @height_mm: * * Addressable size of the output in mm, projectors should set this to * 0. */ u16 height_mm; /** * @type: * * A bitmask of flags, mostly about the source of a mode. Possible flags * are: * * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native * resolution of an LCD panel. There should only be one preferred * mode per connector at any given time. * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of * them really. Drivers must set this bit for all modes they create * and expose to userspace. * - DRM_MODE_TYPE_USERDEF: Mode defined or selected via the kernel * command line. * * Plus a big list of flags which shouldn't be used at all, but are * still around since these flags are also used in the userspace ABI. * We no longer accept modes with these types though: * * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, unused. * Use DRM_MODE_TYPE_DRIVER instead. * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use * DRM_MODE_TYPE_PREFERRED instead. * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers * which are stuck around for hysterical raisins only. No one has an * idea what they were meant for. Don't use. */ u8 type; /** * @expose_to_userspace: * * Indicates whether the mode is to be exposed to the userspace. * This is to maintain a set of exposed modes while preparing * user-mode's list in drm_mode_getconnector ioctl. The purpose of * this only lies in the ioctl function, and is not to be used * outside the function. */ bool expose_to_userspace; /** * @head: * * struct list_head for mode lists. */ struct list_head head; /** * @name: * * Human-readable name of the mode, filled out with drm_mode_set_name(). */ char name[DRM_DISPLAY_MODE_LEN]; /** * @status: * * Status of the mode, used to filter out modes not supported by the * hardware. See enum &drm_mode_status. */ enum drm_mode_status status; /** * @picture_aspect_ratio: * * Field for setting the HDMI picture aspect ratio of a mode. */ enum hdmi_picture_aspect picture_aspect_ratio; }; /** * DRM_MODE_FMT - printf string for &struct drm_display_mode */ #define DRM_MODE_FMT "\"%s\": %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x" /** * DRM_MODE_ARG - printf arguments for &struct drm_display_mode * @m: display mode */ #define DRM_MODE_ARG(m) \ (m)->name, drm_mode_vrefresh(m), (m)->clock, \ (m)->hdisplay, (m)->hsync_start, (m)->hsync_end, (m)->htotal, \ (m)->vdisplay, (m)->vsync_start, (m)->vsync_end, (m)->vtotal, \ (m)->type, (m)->flags #define obj_to_mode(x) container_of(x, struct drm_display_mode, base) /** * drm_mode_is_stereo - check for stereo mode flags * @mode: drm_display_mode to check * * Returns: * True if the mode is one of the stereo modes (like side-by-side), false if * not. */ static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode) { return mode->flags & DRM_MODE_FLAG_3D_MASK; } struct drm_connector; struct drm_cmdline_mode; struct drm_display_mode *drm_mode_create(struct drm_device *dev); void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out, const struct drm_display_mode *in); int drm_mode_convert_umode(struct drm_device *dev, struct drm_display_mode *out, const struct drm_mode_modeinfo *in); void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); void drm_mode_debug_printmodeline(const struct drm_display_mode *mode); bool drm_mode_is_420_only(const struct drm_display_info *display, const struct drm_display_mode *mode); bool drm_mode_is_420_also(const struct drm_display_info *display, const struct drm_display_mode *mode); bool drm_mode_is_420(const struct drm_display_info *display, const struct drm_display_mode *mode); void drm_set_preferred_mode(struct drm_connector *connector, int hpref, int vpref); struct drm_display_mode *drm_analog_tv_mode(struct drm_device *dev, enum drm_connector_tv_mode mode, unsigned long pixel_clock_hz, unsigned int hdisplay, unsigned int vdisplay, bool interlace); static inline struct drm_display_mode *drm_mode_analog_ntsc_480i(struct drm_device *dev) { return drm_analog_tv_mode(dev, DRM_MODE_TV_MODE_NTSC, 13500000, 720, 480, true); } static inline struct drm_display_mode *drm_mode_analog_pal_576i(struct drm_device *dev) { return drm_analog_tv_mode(dev, DRM_MODE_TV_MODE_PAL, 13500000, 720, 576, true); } struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, bool reduced, bool interlaced, bool margins); struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, bool interlaced, int margins); struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, bool interlaced, int margins, int GTF_M, int GTF_2C, int GTF_K, int GTF_2J); void drm_display_mode_from_videomode(const struct videomode *vm, struct drm_display_mode *dmode); void drm_display_mode_to_videomode(const struct drm_display_mode *dmode, struct videomode *vm); void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags); #if defined(CONFIG_OF) int of_get_drm_display_mode(struct device_node *np, struct drm_display_mode *dmode, u32 *bus_flags, int index); int of_get_drm_panel_display_mode(struct device_node *np, struct drm_display_mode *dmode, u32 *bus_flags); #else static inline int of_get_drm_display_mode(struct device_node *np, struct drm_display_mode *dmode, u32 *bus_flags, int index) { return -EINVAL; } static inline int of_get_drm_panel_display_mode(struct device_node *np, struct drm_display_mode *dmode, u32 *bus_flags) { return -EINVAL; } #endif void drm_mode_set_name(struct drm_display_mode *mode); int drm_mode_vrefresh(const struct drm_display_mode *mode); void drm_mode_get_hv_timing(const struct drm_display_mode *mode, int *hdisplay, int *vdisplay); void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags); void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); void drm_mode_init(struct drm_display_mode *dst, const struct drm_display_mode *src); struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, const struct drm_display_mode *mode); bool drm_mode_match(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2, unsigned int match_flags); bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); /* for use by the crtc helper probe functions */ enum drm_mode_status drm_mode_validate_driver(struct drm_device *dev, const struct drm_display_mode *mode); enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode, int maxX, int maxY); enum drm_mode_status drm_mode_validate_ycbcr420(const struct drm_display_mode *mode, struct drm_connector *connector); void drm_mode_prune_invalid(struct drm_device *dev, struct list_head *mode_list, bool verbose); void drm_mode_sort(struct list_head *mode_list); void drm_connector_list_update(struct drm_connector *connector); /* parsing cmdline modes */ bool drm_mode_parse_command_line_for_connector(const char *mode_option, const struct drm_connector *connector, struct drm_cmdline_mode *mode); struct drm_display_mode * drm_mode_create_from_cmdline_mode(struct drm_device *dev, struct drm_cmdline_mode *cmd); #endif /* __DRM_MODES_H__ */
6 6 2 2 2 3 1 2 2 3 1 1 1 1 4 1 1 3 2 4 10 1 1 8 5 10 1 1 4 3 4 11 7 11 2 2 10 2 10 26 10 26 2 2 2 3 3 5 2 4 11 11 2 2 8 7 1 3 3 1 1 17 17 12 12 5 5 35 35 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 27 28 27 2 2 28 1 28 27 28 3 8 8 13 1 13 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 // SPDX-License-Identifier: GPL-2.0-only /* * vivid-core.c - A Virtual Video Test Driver, core initialization * * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/font.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/videodev2.h> #include <linux/v4l2-dv-timings.h> #include <media/videobuf2-vmalloc.h> #include <media/videobuf2-dma-contig.h> #include <media/v4l2-dv-timings.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-fh.h> #include <media/v4l2-event.h> #include "vivid-core.h" #include "vivid-vid-common.h" #include "vivid-vid-cap.h" #include "vivid-vid-out.h" #include "vivid-radio-common.h" #include "vivid-radio-rx.h" #include "vivid-radio-tx.h" #include "vivid-sdr-cap.h" #include "vivid-vbi-cap.h" #include "vivid-vbi-out.h" #include "vivid-osd.h" #include "vivid-cec.h" #include "vivid-ctrls.h" #include "vivid-meta-cap.h" #include "vivid-meta-out.h" #include "vivid-touch-cap.h" #define VIVID_MODULE_NAME "vivid" #define MAX_STRING_LENGTH 23 MODULE_DESCRIPTION("Virtual Video Test Driver"); MODULE_AUTHOR("Hans Verkuil"); MODULE_LICENSE("GPL"); unsigned int n_devs = 1; module_param(n_devs, uint, 0444); MODULE_PARM_DESC(n_devs, " number of driver instances to create"); static int vid_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(vid_cap_nr, int, NULL, 0444); MODULE_PARM_DESC(vid_cap_nr, " videoX start number, -1 is autodetect"); static int vid_out_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(vid_out_nr, int, NULL, 0444); MODULE_PARM_DESC(vid_out_nr, " videoX start number, -1 is autodetect"); static int vbi_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(vbi_cap_nr, int, NULL, 0444); MODULE_PARM_DESC(vbi_cap_nr, " vbiX start number, -1 is autodetect"); static int vbi_out_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(vbi_out_nr, int, NULL, 0444); MODULE_PARM_DESC(vbi_out_nr, " vbiX start number, -1 is autodetect"); static int sdr_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(sdr_cap_nr, int, NULL, 0444); MODULE_PARM_DESC(sdr_cap_nr, " swradioX start number, -1 is autodetect"); static int radio_rx_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(radio_rx_nr, int, NULL, 0444); MODULE_PARM_DESC(radio_rx_nr, " radioX start number, -1 is autodetect"); static int radio_tx_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(radio_tx_nr, int, NULL, 0444); MODULE_PARM_DESC(radio_tx_nr, " radioX start number, -1 is autodetect"); static int meta_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(meta_cap_nr, int, NULL, 0444); MODULE_PARM_DESC(meta_cap_nr, " videoX start number, -1 is autodetect"); static int meta_out_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(meta_out_nr, int, NULL, 0444); MODULE_PARM_DESC(meta_out_nr, " videoX start number, -1 is autodetect"); static int touch_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(touch_cap_nr, int, NULL, 0444); MODULE_PARM_DESC(touch_cap_nr, " v4l-touchX start number, -1 is autodetect"); static int ccs_cap_mode[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(ccs_cap_mode, int, NULL, 0444); MODULE_PARM_DESC(ccs_cap_mode, " capture crop/compose/scale mode:\n" "\t\t bit 0=crop, 1=compose, 2=scale,\n" "\t\t -1=user-controlled (default)"); static int ccs_out_mode[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(ccs_out_mode, int, NULL, 0444); MODULE_PARM_DESC(ccs_out_mode, " output crop/compose/scale mode:\n" "\t\t bit 0=crop, 1=compose, 2=scale,\n" "\t\t -1=user-controlled (default)"); static unsigned multiplanar[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 1 }; module_param_array(multiplanar, uint, NULL, 0444); MODULE_PARM_DESC(multiplanar, " 1 (default) creates a single planar device, 2 creates a multiplanar device."); /* * Default: video + vbi-cap (raw and sliced) + radio rx + radio tx + sdr + * vbi-out + vid-out + meta-cap */ static unsigned int node_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0xe1d3d }; module_param_array(node_types, uint, NULL, 0444); MODULE_PARM_DESC(node_types, " node types, default is 0xe1d3d. Bitmask with the following meaning:\n" "\t\t bit 0: Video Capture node\n" "\t\t bit 2-3: VBI Capture node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n" "\t\t bit 4: Radio Receiver node\n" "\t\t bit 5: Software Defined Radio Receiver node\n" "\t\t bit 8: Video Output node\n" "\t\t bit 10-11: VBI Output node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n" "\t\t bit 12: Radio Transmitter node\n" #ifdef CONFIG_VIDEO_VIVID_OSD "\t\t bit 16: Framebuffer for testing output overlays\n" #endif "\t\t bit 17: Metadata Capture node\n" "\t\t bit 18: Metadata Output node\n" "\t\t bit 19: Touch Capture node\n"); /* Default: 4 inputs */ static unsigned num_inputs[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 4 }; module_param_array(num_inputs, uint, NULL, 0444); MODULE_PARM_DESC(num_inputs, " number of inputs, default is 4"); /* Default: input 0 = WEBCAM, 1 = TV, 2 = SVID, 3 = HDMI */ static unsigned input_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0xe4 }; module_param_array(input_types, uint, NULL, 0444); MODULE_PARM_DESC(input_types, " input types, default is 0xe4. Two bits per input,\n" "\t\t bits 0-1 == input 0, bits 31-30 == input 15.\n" "\t\t Type 0 == webcam, 1 == TV, 2 == S-Video, 3 == HDMI"); /* Default: 2 outputs */ static unsigned num_outputs[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 2 }; module_param_array(num_outputs, uint, NULL, 0444); MODULE_PARM_DESC(num_outputs, " number of outputs, default is 2"); /* Default: output 0 = SVID, 1 = HDMI */ static unsigned output_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 2 }; module_param_array(output_types, uint, NULL, 0444); MODULE_PARM_DESC(output_types, " output types, default is 0x02. One bit per output,\n" "\t\t bit 0 == output 0, bit 15 == output 15.\n" "\t\t Type 0 == S-Video, 1 == HDMI"); unsigned vivid_debug; module_param(vivid_debug, uint, 0644); MODULE_PARM_DESC(vivid_debug, " activates debug info"); static bool no_error_inj; module_param(no_error_inj, bool, 0444); MODULE_PARM_DESC(no_error_inj, " if set disable the error injecting controls"); static unsigned int allocators[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0 }; module_param_array(allocators, uint, NULL, 0444); MODULE_PARM_DESC(allocators, " memory allocator selection, default is 0.\n" "\t\t 0 == vmalloc\n" "\t\t 1 == dma-contig"); static unsigned int cache_hints[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0 }; module_param_array(cache_hints, uint, NULL, 0444); MODULE_PARM_DESC(cache_hints, " user-space cache hints, default is 0.\n" "\t\t 0 == forbid\n" "\t\t 1 == allow"); static unsigned int supports_requests[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 1 }; module_param_array(supports_requests, uint, NULL, 0444); MODULE_PARM_DESC(supports_requests, " support for requests, default is 1.\n" "\t\t 0 == no support\n" "\t\t 1 == supports requests\n" "\t\t 2 == requires requests"); struct vivid_dev *vivid_devs[VIVID_MAX_DEVS]; DEFINE_SPINLOCK(hdmi_output_skip_mask_lock); struct workqueue_struct *update_hdmi_ctrls_workqueue; u64 hdmi_to_output_menu_skip_mask; u64 hdmi_input_update_outputs_mask; struct vivid_dev *vivid_ctrl_hdmi_to_output_instance[MAX_MENU_ITEMS]; unsigned int vivid_ctrl_hdmi_to_output_index[MAX_MENU_ITEMS]; char *vivid_ctrl_hdmi_to_output_strings[MAX_MENU_ITEMS + 1] = { "Test Pattern Generator", "None" }; DEFINE_SPINLOCK(svid_output_skip_mask_lock); struct workqueue_struct *update_svid_ctrls_workqueue; u64 svid_to_output_menu_skip_mask; struct vivid_dev *vivid_ctrl_svid_to_output_instance[MAX_MENU_ITEMS]; unsigned int vivid_ctrl_svid_to_output_index[MAX_MENU_ITEMS]; char *vivid_ctrl_svid_to_output_strings[MAX_MENU_ITEMS + 1] = { "Test Pattern Generator", "None" }; const struct v4l2_rect vivid_min_rect = { 0, 0, MIN_WIDTH, MIN_HEIGHT }; const struct v4l2_rect vivid_max_rect = { 0, 0, MAX_WIDTH * MAX_ZOOM, MAX_HEIGHT * MAX_ZOOM }; static const u8 vivid_hdmi_edid[256] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x34, 0x12, 0x00, 0x00, 0x00, 0x00, 0x22, 0x1a, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78, 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0x2f, 0xcf, 0x00, 0x31, 0x59, 0x45, 0x59, 0x81, 0x80, 0x81, 0x40, 0x90, 0x40, 0x95, 0x00, 0xa9, 0x40, 0xb3, 0x00, 0x08, 0xe8, 0x00, 0x30, 0xf2, 0x70, 0x5a, 0x80, 0xb0, 0x58, 0x8a, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18, 0x87, 0x3c, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x76, 0x69, 0x76, 0x69, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7b, 0x02, 0x03, 0x3f, 0xf1, 0x51, 0x61, 0x60, 0x5f, 0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21, 0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09, 0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x21, 0x00, 0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4, 0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xca, 0xe3, 0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d, 0xd0, 0x00, 0xa0, 0xf0, 0x70, 0x3e, 0x80, 0x30, 0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e, 0x1a, 0x36, 0x80, 0xa0, 0x70, 0x38, 0x1f, 0x40, 0x30, 0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51, 0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, }; static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct vivid_dev *dev = video_drvdata(file); strscpy(cap->driver, "vivid", sizeof(cap->driver)); strscpy(cap->card, "vivid", sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s-%03d", VIVID_MODULE_NAME, dev->inst); cap->capabilities = dev->vid_cap_caps | dev->vid_out_caps | dev->vbi_cap_caps | dev->vbi_out_caps | dev->radio_rx_caps | dev->radio_tx_caps | dev->sdr_cap_caps | dev->meta_cap_caps | dev->meta_out_caps | dev->touch_cap_caps | V4L2_CAP_DEVICE_CAPS; return 0; } static int vidioc_s_hw_freq_seek(struct file *file, void *priv, const struct v4l2_hw_freq_seek *a) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_RADIO) return vivid_radio_rx_s_hw_freq_seek(file, priv, a); return -ENOTTY; } static int vidioc_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_RADIO) return vivid_radio_rx_enum_freq_bands(file, priv, band); if (vdev->vfl_type == VFL_TYPE_SDR) return vivid_sdr_enum_freq_bands(file, priv, band); return -ENOTTY; } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_RADIO) return vivid_radio_rx_g_tuner(file, priv, vt); if (vdev->vfl_type == VFL_TYPE_SDR) return vivid_sdr_g_tuner(file, priv, vt); return vivid_video_g_tuner(file, priv, vt); } static int vidioc_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_RADIO) return vivid_radio_rx_s_tuner(file, priv, vt); if (vdev->vfl_type == VFL_TYPE_SDR) return vivid_sdr_s_tuner(file, priv, vt); return vivid_video_s_tuner(file, priv, vt); } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *vf) { struct vivid_dev *dev = video_drvdata(file); struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_RADIO) return vivid_radio_g_frequency(file, vdev->vfl_dir == VFL_DIR_RX ? &dev->radio_rx_freq : &dev->radio_tx_freq, vf); if (vdev->vfl_type == VFL_TYPE_SDR) return vivid_sdr_g_frequency(file, priv, vf); return vivid_video_g_frequency(file, priv, vf); } static int vidioc_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *vf) { struct vivid_dev *dev = video_drvdata(file); struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_RADIO) return vivid_radio_s_frequency(file, vdev->vfl_dir == VFL_DIR_RX ? &dev->radio_rx_freq : &dev->radio_tx_freq, vf); if (vdev->vfl_type == VFL_TYPE_SDR) return vivid_sdr_s_frequency(file, priv, vf); return vivid_video_s_frequency(file, priv, vf); } static int vidioc_overlay(struct file *file, void *priv, unsigned i) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return -ENOTTY; return vivid_vid_out_overlay(file, priv, i); } static int vidioc_g_fbuf(struct file *file, void *priv, struct v4l2_framebuffer *a) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return -ENOTTY; return vivid_vid_out_g_fbuf(file, priv, a); } static int vidioc_s_fbuf(struct file *file, void *priv, const struct v4l2_framebuffer *a) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return -ENOTTY; return vivid_vid_out_s_fbuf(file, priv, a); } static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return vivid_vid_cap_s_std(file, priv, id); return vivid_vid_out_s_std(file, priv, id); } static int vidioc_s_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return vivid_vid_cap_s_dv_timings(file, priv, timings); return vivid_vid_out_s_dv_timings(file, priv, timings); } static int vidioc_g_pixelaspect(struct file *file, void *priv, int type, struct v4l2_fract *f) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return vivid_vid_cap_g_pixelaspect(file, priv, type, f); return vivid_vid_out_g_pixelaspect(file, priv, type, f); } static int vidioc_g_selection(struct file *file, void *priv, struct v4l2_selection *sel) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return vivid_vid_cap_g_selection(file, priv, sel); return vivid_vid_out_g_selection(file, priv, sel); } static int vidioc_s_selection(struct file *file, void *priv, struct v4l2_selection *sel) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return vivid_vid_cap_s_selection(file, priv, sel); return vivid_vid_out_s_selection(file, priv, sel); } static int vidioc_g_parm(struct file *file, void *priv, struct v4l2_streamparm *parm) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_g_parm_tch(file, priv, parm); if (vdev->vfl_dir == VFL_DIR_RX) return vivid_vid_cap_g_parm(file, priv, parm); return vivid_vid_out_g_parm(file, priv, parm); } static int vidioc_s_parm(struct file *file, void *priv, struct v4l2_streamparm *parm) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return vivid_vid_cap_s_parm(file, priv, parm); return -ENOTTY; } static int vidioc_log_status(struct file *file, void *priv) { struct vivid_dev *dev = video_drvdata(file); struct video_device *vdev = video_devdata(file); v4l2_ctrl_log_status(file, priv); if (vdev->vfl_dir == VFL_DIR_RX && vdev->vfl_type == VFL_TYPE_VIDEO) tpg_log_status(&dev->tpg); return 0; } static ssize_t vivid_radio_read(struct file *file, char __user *buf, size_t size, loff_t *offset) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_TX) return -EINVAL; return vivid_radio_rx_read(file, buf, size, offset); } static ssize_t vivid_radio_write(struct file *file, const char __user *buf, size_t size, loff_t *offset) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return -EINVAL; return vivid_radio_tx_write(file, buf, size, offset); } static __poll_t vivid_radio_poll(struct file *file, struct poll_table_struct *wait) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return vivid_radio_rx_poll(file, wait); return vivid_radio_tx_poll(file, wait); } static int vivid_enum_input(struct file *file, void *priv, struct v4l2_input *inp) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_enum_input_tch(file, priv, inp); return vidioc_enum_input(file, priv, inp); } static int vivid_g_input(struct file *file, void *priv, unsigned int *i) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_g_input_tch(file, priv, i); return vidioc_g_input(file, priv, i); } static int vivid_s_input(struct file *file, void *priv, unsigned int i) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_s_input_tch(file, priv, i); return vidioc_s_input(file, priv, i); } static int vivid_enum_fmt_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_enum_fmt_tch(file, priv, f); return vivid_enum_fmt_vid(file, priv, f); } static int vivid_g_fmt_cap(struct file *file, void *priv, struct v4l2_format *f) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_g_fmt_tch(file, priv, f); return vidioc_g_fmt_vid_cap(file, priv, f); } static int vivid_try_fmt_cap(struct file *file, void *priv, struct v4l2_format *f) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_g_fmt_tch(file, priv, f); return vidioc_try_fmt_vid_cap(file, priv, f); } static int vivid_s_fmt_cap(struct file *file, void *priv, struct v4l2_format *f) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_g_fmt_tch(file, priv, f); return vidioc_s_fmt_vid_cap(file, priv, f); } static int vivid_g_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_g_fmt_tch_mplane(file, priv, f); return vidioc_g_fmt_vid_cap_mplane(file, priv, f); } static int vivid_try_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_g_fmt_tch_mplane(file, priv, f); return vidioc_try_fmt_vid_cap_mplane(file, priv, f); } static int vivid_s_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_g_fmt_tch_mplane(file, priv, f); return vidioc_s_fmt_vid_cap_mplane(file, priv, f); } static bool vivid_is_in_use(bool valid, struct video_device *vdev) { unsigned long flags; bool res; if (!valid) return false; spin_lock_irqsave(&vdev->fh_lock, flags); res = !list_empty(&vdev->fh_list); spin_unlock_irqrestore(&vdev->fh_lock, flags); return res; } static bool vivid_is_last_user(struct vivid_dev *dev) { unsigned int uses = vivid_is_in_use(dev->has_vid_cap, &dev->vid_cap_dev) + vivid_is_in_use(dev->has_vid_out, &dev->vid_out_dev) + vivid_is_in_use(dev->has_vbi_cap, &dev->vbi_cap_dev) + vivid_is_in_use(dev->has_vbi_out, &dev->vbi_out_dev) + vivid_is_in_use(dev->has_radio_rx, &dev->radio_rx_dev) + vivid_is_in_use(dev->has_radio_tx, &dev->radio_tx_dev) + vivid_is_in_use(dev->has_sdr_cap, &dev->sdr_cap_dev) + vivid_is_in_use(dev->has_meta_cap, &dev->meta_cap_dev) + vivid_is_in_use(dev->has_meta_out, &dev->meta_out_dev) + vivid_is_in_use(dev->has_touch_cap, &dev->touch_cap_dev); return uses == 1; } static void vivid_reconnect(struct vivid_dev *dev) { if (dev->has_vid_cap) set_bit(V4L2_FL_REGISTERED, &dev->vid_cap_dev.flags); if (dev->has_vid_out) set_bit(V4L2_FL_REGISTERED, &dev->vid_out_dev.flags); if (dev->has_vbi_cap) set_bit(V4L2_FL_REGISTERED, &dev->vbi_cap_dev.flags); if (dev->has_vbi_out) set_bit(V4L2_FL_REGISTERED, &dev->vbi_out_dev.flags); if (dev->has_radio_rx) set_bit(V4L2_FL_REGISTERED, &dev->radio_rx_dev.flags); if (dev->has_radio_tx) set_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags); if (dev->has_sdr_cap) set_bit(V4L2_FL_REGISTERED, &dev->sdr_cap_dev.flags); if (dev->has_meta_cap) set_bit(V4L2_FL_REGISTERED, &dev->meta_cap_dev.flags); if (dev->has_meta_out) set_bit(V4L2_FL_REGISTERED, &dev->meta_out_dev.flags); if (dev->has_touch_cap) set_bit(V4L2_FL_REGISTERED, &dev->touch_cap_dev.flags); dev->disconnect_error = false; } static int vivid_fop_release(struct file *file) { struct vivid_dev *dev = video_drvdata(file); struct video_device *vdev = video_devdata(file); mutex_lock(&dev->mutex); if (!no_error_inj && v4l2_fh_is_singular_file(file) && dev->disconnect_error && !video_is_registered(vdev) && vivid_is_last_user(dev)) { /* * I am the last user of this driver, and a disconnect * was forced (since this video_device is unregistered), * so re-register all video_device's again. */ v4l2_info(&dev->v4l2_dev, "reconnect\n"); vivid_reconnect(dev); } if (file_to_v4l2_fh(file) == dev->radio_rx_rds_owner) { dev->radio_rx_rds_last_block = 0; dev->radio_rx_rds_owner = NULL; } if (file_to_v4l2_fh(file) == dev->radio_tx_rds_owner) { dev->radio_tx_rds_last_block = 0; dev->radio_tx_rds_owner = NULL; } mutex_unlock(&dev->mutex); if (vdev->queue) return vb2_fop_release(file); return v4l2_fh_release(file); } static const struct v4l2_file_operations vivid_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vivid_fop_release, .read = vb2_fop_read, .write = vb2_fop_write, .poll = vb2_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, }; static const struct v4l2_file_operations vivid_radio_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vivid_fop_release, .read = vivid_radio_read, .write = vivid_radio_write, .poll = vivid_radio_poll, .unlocked_ioctl = video_ioctl2, }; static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct video_device *vdev = video_devdata(file); int r; /* * Sliced and raw VBI capture share the same queue so we must * change the type. */ if (p->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE || p->type == V4L2_BUF_TYPE_VBI_CAPTURE) { r = vb2_queue_change_type(vdev->queue, p->type); if (r) return r; } return vb2_ioctl_reqbufs(file, priv, p); } static int vidioc_create_bufs(struct file *file, void *priv, struct v4l2_create_buffers *p) { struct video_device *vdev = video_devdata(file); int r; /* * Sliced and raw VBI capture share the same queue so we must * change the type. */ if (p->format.type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE || p->format.type == V4L2_BUF_TYPE_VBI_CAPTURE) { r = vb2_queue_change_type(vdev->queue, p->format.type); if (r) return r; } return vb2_ioctl_create_bufs(file, priv, p); } static const struct v4l2_ioctl_ops vivid_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vivid_enum_fmt_cap, .vidioc_g_fmt_vid_cap = vivid_g_fmt_cap, .vidioc_try_fmt_vid_cap = vivid_try_fmt_cap, .vidioc_s_fmt_vid_cap = vivid_s_fmt_cap, .vidioc_g_fmt_vid_cap_mplane = vivid_g_fmt_cap_mplane, .vidioc_try_fmt_vid_cap_mplane = vivid_try_fmt_cap_mplane, .vidioc_s_fmt_vid_cap_mplane = vivid_s_fmt_cap_mplane, .vidioc_enum_fmt_vid_out = vivid_enum_fmt_vid, .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out, .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out, .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out, .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_vid_out_mplane, .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out_mplane, .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_vid_out_mplane, .vidioc_g_selection = vidioc_g_selection, .vidioc_s_selection = vidioc_s_selection, .vidioc_g_pixelaspect = vidioc_g_pixelaspect, .vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap, .vidioc_try_fmt_vbi_cap = vidioc_g_fmt_vbi_cap, .vidioc_s_fmt_vbi_cap = vidioc_s_fmt_vbi_cap, .vidioc_g_fmt_sliced_vbi_cap = vidioc_g_fmt_sliced_vbi_cap, .vidioc_try_fmt_sliced_vbi_cap = vidioc_try_fmt_sliced_vbi_cap, .vidioc_s_fmt_sliced_vbi_cap = vidioc_s_fmt_sliced_vbi_cap, .vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap, .vidioc_g_fmt_vbi_out = vidioc_g_fmt_vbi_out, .vidioc_try_fmt_vbi_out = vidioc_g_fmt_vbi_out, .vidioc_s_fmt_vbi_out = vidioc_s_fmt_vbi_out, .vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out, .vidioc_try_fmt_sliced_vbi_out = vidioc_try_fmt_sliced_vbi_out, .vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out, .vidioc_enum_fmt_sdr_cap = vidioc_enum_fmt_sdr_cap, .vidioc_g_fmt_sdr_cap = vidioc_g_fmt_sdr_cap, .vidioc_try_fmt_sdr_cap = vidioc_try_fmt_sdr_cap, .vidioc_s_fmt_sdr_cap = vidioc_s_fmt_sdr_cap, .vidioc_overlay = vidioc_overlay, .vidioc_enum_framesizes = vidioc_enum_framesizes, .vidioc_enum_frameintervals = vidioc_enum_frameintervals, .vidioc_g_parm = vidioc_g_parm, .vidioc_s_parm = vidioc_s_parm, .vidioc_g_fmt_vid_out_overlay = vidioc_g_fmt_vid_out_overlay, .vidioc_try_fmt_vid_out_overlay = vidioc_try_fmt_vid_out_overlay, .vidioc_s_fmt_vid_out_overlay = vidioc_s_fmt_vid_out_overlay, .vidioc_g_fbuf = vidioc_g_fbuf, .vidioc_s_fbuf = vidioc_s_fbuf, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_create_bufs = vidioc_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_remove_bufs = vb2_ioctl_remove_bufs, .vidioc_enum_input = vivid_enum_input, .vidioc_g_input = vivid_g_input, .vidioc_s_input = vivid_s_input, .vidioc_s_audio = vidioc_s_audio, .vidioc_g_audio = vidioc_g_audio, .vidioc_enumaudio = vidioc_enumaudio, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_modulator = vidioc_s_modulator, .vidioc_g_modulator = vidioc_g_modulator, .vidioc_s_hw_freq_seek = vidioc_s_hw_freq_seek, .vidioc_enum_freq_bands = vidioc_enum_freq_bands, .vidioc_enum_output = vidioc_enum_output, .vidioc_g_output = vidioc_g_output, .vidioc_s_output = vidioc_s_output, .vidioc_s_audout = vidioc_s_audout, .vidioc_g_audout = vidioc_g_audout, .vidioc_enumaudout = vidioc_enumaudout, .vidioc_querystd = vidioc_querystd, .vidioc_g_std = vidioc_g_std, .vidioc_s_std = vidioc_s_std, .vidioc_s_dv_timings = vidioc_s_dv_timings, .vidioc_g_dv_timings = vidioc_g_dv_timings, .vidioc_query_dv_timings = vidioc_query_dv_timings, .vidioc_enum_dv_timings = vidioc_enum_dv_timings, .vidioc_dv_timings_cap = vidioc_dv_timings_cap, .vidioc_g_edid = vidioc_g_edid, .vidioc_s_edid = vidioc_s_edid, .vidioc_log_status = vidioc_log_status, .vidioc_subscribe_event = vidioc_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, .vidioc_enum_fmt_meta_cap = vidioc_enum_fmt_meta_cap, .vidioc_g_fmt_meta_cap = vidioc_g_fmt_meta_cap, .vidioc_s_fmt_meta_cap = vidioc_g_fmt_meta_cap, .vidioc_try_fmt_meta_cap = vidioc_g_fmt_meta_cap, .vidioc_enum_fmt_meta_out = vidioc_enum_fmt_meta_out, .vidioc_g_fmt_meta_out = vidioc_g_fmt_meta_out, .vidioc_s_fmt_meta_out = vidioc_g_fmt_meta_out, .vidioc_try_fmt_meta_out = vidioc_g_fmt_meta_out, }; /* ----------------------------------------------------------------- Initialization and module stuff ------------------------------------------------------------------*/ static void vivid_dev_release(struct v4l2_device *v4l2_dev) { struct vivid_dev *dev = container_of(v4l2_dev, struct vivid_dev, v4l2_dev); cancel_work_sync(&dev->update_hdmi_ctrl_work); vivid_free_controls(dev); v4l2_device_unregister(&dev->v4l2_dev); #ifdef CONFIG_MEDIA_CONTROLLER media_device_cleanup(&dev->mdev); #endif vfree(dev->scaled_line); vfree(dev->blended_line); vfree(dev->edid); tpg_free(&dev->tpg); kfree(dev->query_dv_timings_qmenu); kfree(dev->query_dv_timings_qmenu_strings); kfree(dev); } #ifdef CONFIG_MEDIA_CONTROLLER static int vivid_req_validate(struct media_request *req) { struct vivid_dev *dev = container_of(req->mdev, struct vivid_dev, mdev); if (dev->req_validate_error) { dev->req_validate_error = false; return -EINVAL; } return vb2_request_validate(req); } static const struct media_device_ops vivid_media_ops = { .req_validate = vivid_req_validate, .req_queue = vb2_request_queue, }; #endif static int vivid_create_queue(struct vivid_dev *dev, struct vb2_queue *q, u32 buf_type, unsigned int min_reqbufs_allocation, const struct vb2_ops *ops) { if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->multiplanar) buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; else if (buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT && dev->multiplanar) buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; else if (buf_type == V4L2_BUF_TYPE_VBI_CAPTURE && !dev->has_raw_vbi_cap) buf_type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE; else if (buf_type == V4L2_BUF_TYPE_VBI_OUTPUT && !dev->has_raw_vbi_out) buf_type = V4L2_BUF_TYPE_SLICED_VBI_OUTPUT; q->type = buf_type; q->io_modes = VB2_MMAP | VB2_DMABUF; q->io_modes |= V4L2_TYPE_IS_OUTPUT(buf_type) ? VB2_WRITE : VB2_READ; /* * The maximum number of buffers is 32768 if PAGE_SHIFT == 12, * see also MAX_BUFFER_INDEX in videobuf2-core.c. It will be less if * PAGE_SHIFT > 12, but then max_num_buffers will be clamped by * videobuf2-core.c to MAX_BUFFER_INDEX. */ if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) q->max_num_buffers = MAX_VID_CAP_BUFFERS; if (buf_type == V4L2_BUF_TYPE_SDR_CAPTURE) q->max_num_buffers = 1024; if (buf_type == V4L2_BUF_TYPE_VBI_CAPTURE) q->max_num_buffers = 32768; if (allocators[dev->inst] != 1) q->io_modes |= VB2_USERPTR; q->drv_priv = dev; q->buf_struct_size = sizeof(struct vivid_buffer); q->ops = ops; q->mem_ops = allocators[dev->inst] == 1 ? &vb2_dma_contig_memops : &vb2_vmalloc_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->min_reqbufs_allocation = min_reqbufs_allocation; q->lock = &dev->mutex; q->dev = dev->v4l2_dev.dev; q->supports_requests = supports_requests[dev->inst]; q->requires_requests = supports_requests[dev->inst] >= 2; q->allow_cache_hints = (cache_hints[dev->inst] == 1); return vb2_queue_init(q); } static int vivid_detect_feature_set(struct vivid_dev *dev, int inst, unsigned node_type, bool *has_tuner, bool *has_modulator, int *ccs_cap, int *ccs_out, unsigned in_type_counter[4], unsigned out_type_counter[4]) { int i; /* do we use single- or multi-planar? */ dev->multiplanar = multiplanar[inst] > 1; v4l2_info(&dev->v4l2_dev, "using %splanar format API\n", dev->multiplanar ? "multi" : "single "); /* how many inputs do we have and of what type? */ dev->num_inputs = num_inputs[inst]; if (node_type & 0x20007) { if (dev->num_inputs < 1) dev->num_inputs = 1; } else { dev->num_inputs = 0; } if (dev->num_inputs >= MAX_INPUTS) dev->num_inputs = MAX_INPUTS; for (i = 0; i < dev->num_inputs; i++) { dev->input_type[i] = (input_types[inst] >> (i * 2)) & 0x3; dev->input_name_counter[i] = in_type_counter[dev->input_type[i]]++; } dev->has_audio_inputs = in_type_counter[TV] && in_type_counter[SVID]; if (in_type_counter[HDMI] == 16) { /* The CEC physical address only allows for max 15 inputs */ in_type_counter[HDMI]--; dev->num_inputs--; } dev->num_hdmi_inputs = in_type_counter[HDMI]; dev->num_svid_inputs = in_type_counter[SVID]; /* how many outputs do we have and of what type? */ dev->num_outputs = num_outputs[inst]; if (node_type & 0x40300) { if (dev->num_outputs < 1) dev->num_outputs = 1; } else { dev->num_outputs = 0; } if (dev->num_outputs >= MAX_OUTPUTS) dev->num_outputs = MAX_OUTPUTS; for (i = 0; i < dev->num_outputs; i++) { dev->output_type[i] = ((output_types[inst] >> i) & 1) ? HDMI : SVID; dev->output_name_counter[i] = out_type_counter[dev->output_type[i]]++; } dev->has_audio_outputs = out_type_counter[SVID]; if (out_type_counter[HDMI] == 16) { /* * The CEC physical address only allows for max 15 inputs, * so outputs are also limited to 15 to allow for easy * CEC output to input mapping. */ out_type_counter[HDMI]--; dev->num_outputs--; } dev->num_hdmi_outputs = out_type_counter[HDMI]; /* do we create a video capture device? */ dev->has_vid_cap = node_type & 0x0001; /* do we create a vbi capture device? */ if (in_type_counter[TV] || in_type_counter[SVID]) { dev->has_raw_vbi_cap = node_type & 0x0004; dev->has_sliced_vbi_cap = node_type & 0x0008; dev->has_vbi_cap = dev->has_raw_vbi_cap | dev->has_sliced_vbi_cap; } /* do we create a meta capture device */ dev->has_meta_cap = node_type & 0x20000; /* sanity checks */ if ((in_type_counter[WEBCAM] || in_type_counter[HDMI]) && !dev->has_vid_cap && !dev->has_meta_cap) { v4l2_warn(&dev->v4l2_dev, "Webcam or HDMI input without video or metadata nodes\n"); return -EINVAL; } if ((in_type_counter[TV] || in_type_counter[SVID]) && !dev->has_vid_cap && !dev->has_vbi_cap && !dev->has_meta_cap) { v4l2_warn(&dev->v4l2_dev, "TV or S-Video input without video, VBI or metadata nodes\n"); return -EINVAL; } /* do we create a video output device? */ dev->has_vid_out = node_type & 0x0100; /* do we create a vbi output device? */ if (out_type_counter[SVID]) { dev->has_raw_vbi_out = node_type & 0x0400; dev->has_sliced_vbi_out = node_type & 0x0800; dev->has_vbi_out = dev->has_raw_vbi_out | dev->has_sliced_vbi_out; } /* do we create a metadata output device */ dev->has_meta_out = node_type & 0x40000; /* sanity checks */ if (out_type_counter[SVID] && !dev->has_vid_out && !dev->has_vbi_out && !dev->has_meta_out) { v4l2_warn(&dev->v4l2_dev, "S-Video output without video, VBI or metadata nodes\n"); return -EINVAL; } if (out_type_counter[HDMI] && !dev->has_vid_out && !dev->has_meta_out) { v4l2_warn(&dev->v4l2_dev, "HDMI output without video or metadata nodes\n"); return -EINVAL; } /* do we create a radio receiver device? */ dev->has_radio_rx = node_type & 0x0010; /* do we create a radio transmitter device? */ dev->has_radio_tx = node_type & 0x1000; /* do we create a software defined radio capture device? */ dev->has_sdr_cap = node_type & 0x0020; /* do we have a TV tuner? */ dev->has_tv_tuner = in_type_counter[TV]; /* do we have a tuner? */ *has_tuner = ((dev->has_vid_cap || dev->has_vbi_cap) && in_type_counter[TV]) || dev->has_radio_rx || dev->has_sdr_cap; /* do we have a modulator? */ *has_modulator = dev->has_radio_tx; #ifdef CONFIG_VIDEO_VIVID_OSD if (dev->has_vid_cap) /* do we have a framebuffer for overlay testing? */ dev->has_fb = node_type & 0x10000; #endif /* can we do crop/compose/scaling while capturing? */ if (no_error_inj && *ccs_cap == -1) *ccs_cap = 7; /* if ccs_cap == -1, then the user can select it using controls */ if (*ccs_cap != -1) { dev->has_crop_cap = *ccs_cap & 1; dev->has_compose_cap = *ccs_cap & 2; dev->has_scaler_cap = *ccs_cap & 4; v4l2_info(&dev->v4l2_dev, "Capture Crop: %c Compose: %c Scaler: %c\n", dev->has_crop_cap ? 'Y' : 'N', dev->has_compose_cap ? 'Y' : 'N', dev->has_scaler_cap ? 'Y' : 'N'); } /* can we do crop/compose/scaling with video output? */ if (no_error_inj && *ccs_out == -1) *ccs_out = 7; /* if ccs_out == -1, then the user can select it using controls */ if (*ccs_out != -1) { dev->has_crop_out = *ccs_out & 1; dev->has_compose_out = *ccs_out & 2; dev->has_scaler_out = *ccs_out & 4; v4l2_info(&dev->v4l2_dev, "Output Crop: %c Compose: %c Scaler: %c\n", dev->has_crop_out ? 'Y' : 'N', dev->has_compose_out ? 'Y' : 'N', dev->has_scaler_out ? 'Y' : 'N'); } /* do we create a touch capture device */ dev->has_touch_cap = node_type & 0x80000; return 0; } static void vivid_set_capabilities(struct vivid_dev *dev) { if (dev->has_vid_cap) { /* set up the capabilities of the video capture device */ dev->vid_cap_caps = dev->multiplanar ? V4L2_CAP_VIDEO_CAPTURE_MPLANE : V4L2_CAP_VIDEO_CAPTURE; dev->vid_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; if (dev->has_audio_inputs) dev->vid_cap_caps |= V4L2_CAP_AUDIO; if (dev->has_tv_tuner) dev->vid_cap_caps |= V4L2_CAP_TUNER; } if (dev->has_vid_out) { /* set up the capabilities of the video output device */ dev->vid_out_caps = dev->multiplanar ? V4L2_CAP_VIDEO_OUTPUT_MPLANE : V4L2_CAP_VIDEO_OUTPUT; if (dev->has_fb) dev->vid_out_caps |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY; dev->vid_out_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; if (dev->has_audio_outputs) dev->vid_out_caps |= V4L2_CAP_AUDIO; } if (dev->has_vbi_cap) { /* set up the capabilities of the vbi capture device */ dev->vbi_cap_caps = (dev->has_raw_vbi_cap ? V4L2_CAP_VBI_CAPTURE : 0) | (dev->has_sliced_vbi_cap ? V4L2_CAP_SLICED_VBI_CAPTURE : 0); dev->vbi_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; if (dev->has_audio_inputs) dev->vbi_cap_caps |= V4L2_CAP_AUDIO; if (dev->has_tv_tuner) dev->vbi_cap_caps |= V4L2_CAP_TUNER; } if (dev->has_vbi_out) { /* set up the capabilities of the vbi output device */ dev->vbi_out_caps = (dev->has_raw_vbi_out ? V4L2_CAP_VBI_OUTPUT : 0) | (dev->has_sliced_vbi_out ? V4L2_CAP_SLICED_VBI_OUTPUT : 0); dev->vbi_out_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; if (dev->has_audio_outputs) dev->vbi_out_caps |= V4L2_CAP_AUDIO; } if (dev->has_sdr_cap) { /* set up the capabilities of the sdr capture device */ dev->sdr_cap_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER; dev->sdr_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; } /* set up the capabilities of the radio receiver device */ if (dev->has_radio_rx) dev->radio_rx_caps = V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE | V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER | V4L2_CAP_READWRITE; /* set up the capabilities of the radio transmitter device */ if (dev->has_radio_tx) dev->radio_tx_caps = V4L2_CAP_RDS_OUTPUT | V4L2_CAP_MODULATOR | V4L2_CAP_READWRITE; /* set up the capabilities of meta capture device */ if (dev->has_meta_cap) { dev->meta_cap_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; if (dev->has_audio_inputs) dev->meta_cap_caps |= V4L2_CAP_AUDIO; if (dev->has_tv_tuner) dev->meta_cap_caps |= V4L2_CAP_TUNER; } /* set up the capabilities of meta output device */ if (dev->has_meta_out) { dev->meta_out_caps = V4L2_CAP_META_OUTPUT | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; if (dev->has_audio_outputs) dev->meta_out_caps |= V4L2_CAP_AUDIO; } /* set up the capabilities of the touch capture device */ if (dev->has_touch_cap) { dev->touch_cap_caps = V4L2_CAP_TOUCH | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; dev->touch_cap_caps |= dev->multiplanar ? V4L2_CAP_VIDEO_CAPTURE_MPLANE : V4L2_CAP_VIDEO_CAPTURE; } } static void vivid_disable_unused_ioctls(struct vivid_dev *dev, bool has_tuner, bool has_modulator, unsigned in_type_counter[4], unsigned out_type_counter[4]) { /* disable invalid ioctls based on the feature set */ if (!dev->has_audio_inputs) { v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_AUDIO); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_AUDIO); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_ENUMAUDIO); v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_AUDIO); v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_AUDIO); v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_ENUMAUDIO); v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_AUDIO); v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_AUDIO); v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_ENUMAUDIO); } if (!dev->has_audio_outputs) { v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_AUDOUT); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_AUDOUT); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUMAUDOUT); v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_S_AUDOUT); v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_AUDOUT); v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_ENUMAUDOUT); v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_S_AUDOUT); v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_G_AUDOUT); v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_ENUMAUDOUT); } if (!in_type_counter[TV] && !in_type_counter[SVID]) { v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_STD); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_STD); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_ENUMSTD); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_QUERYSTD); } if (!out_type_counter[SVID]) { v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_STD); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_STD); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUMSTD); } if (!has_tuner && !has_modulator) { v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_FREQUENCY); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_FREQUENCY); v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_FREQUENCY); v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_FREQUENCY); v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_FREQUENCY); v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_FREQUENCY); } if (!has_tuner) { v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_TUNER); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_TUNER); v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_TUNER); v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_TUNER); v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_TUNER); v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_TUNER); } if (in_type_counter[HDMI] == 0) { v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_EDID); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_EDID); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_DV_TIMINGS_CAP); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_DV_TIMINGS); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_DV_TIMINGS); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_ENUM_DV_TIMINGS); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_QUERY_DV_TIMINGS); } if (out_type_counter[HDMI] == 0) { v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_EDID); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_DV_TIMINGS_CAP); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_DV_TIMINGS); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_DV_TIMINGS); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_DV_TIMINGS); } if (!dev->has_fb) { v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_FBUF); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_FBUF); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_OVERLAY); } v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_HW_FREQ_SEEK); v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_HW_FREQ_SEEK); v4l2_disable_ioctl(&dev->sdr_cap_dev, VIDIOC_S_HW_FREQ_SEEK); v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_HW_FREQ_SEEK); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_FREQUENCY); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_FREQUENCY); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_FRAMESIZES); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_FRAMEINTERVALS); v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_S_FREQUENCY); v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_FREQUENCY); v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_S_FREQUENCY); v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_G_FREQUENCY); v4l2_disable_ioctl(&dev->touch_cap_dev, VIDIOC_S_PARM); v4l2_disable_ioctl(&dev->touch_cap_dev, VIDIOC_ENUM_FRAMESIZES); v4l2_disable_ioctl(&dev->touch_cap_dev, VIDIOC_ENUM_FRAMEINTERVALS); } static int vivid_init_dv_timings(struct vivid_dev *dev) { int i; while (v4l2_dv_timings_presets[dev->query_dv_timings_size].bt.width) dev->query_dv_timings_size++; /* * Create a char pointer array that points to the names of all the * preset timings */ dev->query_dv_timings_qmenu = kmalloc_array(dev->query_dv_timings_size, sizeof(char *), GFP_KERNEL); /* * Create a string array containing the names of all the preset * timings. Each name is max 31 chars long (+ terminating 0). */ dev->query_dv_timings_qmenu_strings = kmalloc_array(dev->query_dv_timings_size, 32, GFP_KERNEL); if (!dev->query_dv_timings_qmenu || !dev->query_dv_timings_qmenu_strings) return -ENOMEM; for (i = 0; i < dev->query_dv_timings_size; i++) { const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt; char *p = dev->query_dv_timings_qmenu_strings + i * 32; u32 htot, vtot; dev->query_dv_timings_qmenu[i] = p; htot = V4L2_DV_BT_FRAME_WIDTH(bt); vtot = V4L2_DV_BT_FRAME_HEIGHT(bt); snprintf(p, 32, "%ux%u%s%u", bt->width, bt->height, bt->interlaced ? "i" : "p", (u32)bt->pixelclock / (htot * vtot)); } return 0; } static int vivid_create_queues(struct vivid_dev *dev) { int ret; /* start creating the vb2 queues */ if (dev->has_vid_cap) { /* initialize vid_cap queue */ ret = vivid_create_queue(dev, &dev->vb_vid_cap_q, V4L2_BUF_TYPE_VIDEO_CAPTURE, 2, &vivid_vid_cap_qops); if (ret) return ret; } if (dev->has_vid_out) { /* initialize vid_out queue */ ret = vivid_create_queue(dev, &dev->vb_vid_out_q, V4L2_BUF_TYPE_VIDEO_OUTPUT, 2, &vivid_vid_out_qops); if (ret) return ret; } if (dev->has_vbi_cap) { /* initialize vbi_cap queue */ ret = vivid_create_queue(dev, &dev->vb_vbi_cap_q, V4L2_BUF_TYPE_VBI_CAPTURE, 2, &vivid_vbi_cap_qops); if (ret) return ret; } if (dev->has_vbi_out) { /* initialize vbi_out queue */ ret = vivid_create_queue(dev, &dev->vb_vbi_out_q, V4L2_BUF_TYPE_VBI_OUTPUT, 2, &vivid_vbi_out_qops); if (ret) return ret; } if (dev->has_sdr_cap) { /* initialize sdr_cap queue */ ret = vivid_create_queue(dev, &dev->vb_sdr_cap_q, V4L2_BUF_TYPE_SDR_CAPTURE, 8, &vivid_sdr_cap_qops); if (ret) return ret; } if (dev->has_meta_cap) { /* initialize meta_cap queue */ ret = vivid_create_queue(dev, &dev->vb_meta_cap_q, V4L2_BUF_TYPE_META_CAPTURE, 2, &vivid_meta_cap_qops); if (ret) return ret; } if (dev->has_meta_out) { /* initialize meta_out queue */ ret = vivid_create_queue(dev, &dev->vb_meta_out_q, V4L2_BUF_TYPE_META_OUTPUT, 2, &vivid_meta_out_qops); if (ret) return ret; } if (dev->has_touch_cap) { /* initialize touch_cap queue */ ret = vivid_create_queue(dev, &dev->vb_touch_cap_q, V4L2_BUF_TYPE_VIDEO_CAPTURE, 2, &vivid_touch_cap_qops); if (ret) return ret; } if (dev->has_fb) { /* Create framebuffer for testing output overlay */ ret = vivid_fb_init(dev); if (ret) return ret; } return 0; } static int vivid_create_devnodes(struct platform_device *pdev, struct vivid_dev *dev, int inst, v4l2_std_id tvnorms_cap, v4l2_std_id tvnorms_out, unsigned in_type_counter[4], unsigned out_type_counter[4]) { struct video_device *vfd; int ret; if (dev->has_vid_cap) { vfd = &dev->vid_cap_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-vid-cap", inst); vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->vid_cap_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->queue = &dev->vb_vid_cap_q; vfd->tvnorms = tvnorms_cap; /* * Provide a mutex to v4l2 core. It will be used to protect * all fops and v4l2 ioctls. */ vfd->lock = &dev->mutex; video_set_drvdata(vfd, dev); #ifdef CONFIG_MEDIA_CONTROLLER dev->vid_cap_pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&vfd->entity, 1, &dev->vid_cap_pad); if (ret) return ret; #endif #ifdef CONFIG_VIDEO_VIVID_CEC if (in_type_counter[HDMI]) { ret = cec_register_adapter(dev->cec_rx_adap, &pdev->dev); if (ret < 0) { cec_delete_adapter(dev->cec_rx_adap); dev->cec_rx_adap = NULL; return ret; } cec_s_phys_addr(dev->cec_rx_adap, 0, false); v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI input\n", dev_name(&dev->cec_rx_adap->devnode.dev)); } #endif ret = video_register_device(vfd, VFL_TYPE_VIDEO, vid_cap_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s\n", video_device_node_name(vfd)); } if (dev->has_vid_out) { #ifdef CONFIG_VIDEO_VIVID_CEC int i; #endif vfd = &dev->vid_out_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-vid-out", inst); vfd->vfl_dir = VFL_DIR_TX; vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->vid_out_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->queue = &dev->vb_vid_out_q; vfd->tvnorms = tvnorms_out; /* * Provide a mutex to v4l2 core. It will be used to protect * all fops and v4l2 ioctls. */ vfd->lock = &dev->mutex; video_set_drvdata(vfd, dev); #ifdef CONFIG_MEDIA_CONTROLLER dev->vid_out_pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&vfd->entity, 1, &dev->vid_out_pad); if (ret) return ret; #endif #ifdef CONFIG_VIDEO_VIVID_CEC for (i = 0; i < dev->num_hdmi_outputs; i++) { ret = cec_register_adapter(dev->cec_tx_adap[i], &pdev->dev); if (ret < 0) { for (; i >= 0; i--) { cec_delete_adapter(dev->cec_tx_adap[i]); dev->cec_tx_adap[i] = NULL; } return ret; } v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI output %d\n", dev_name(&dev->cec_tx_adap[i]->devnode.dev), i); } #endif ret = video_register_device(vfd, VFL_TYPE_VIDEO, vid_out_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 output device registered as %s\n", video_device_node_name(vfd)); } if (dev->has_vbi_cap) { vfd = &dev->vbi_cap_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-vbi-cap", inst); vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->vbi_cap_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->queue = &dev->vb_vbi_cap_q; vfd->lock = &dev->mutex; vfd->tvnorms = tvnorms_cap; video_set_drvdata(vfd, dev); #ifdef CONFIG_MEDIA_CONTROLLER dev->vbi_cap_pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&vfd->entity, 1, &dev->vbi_cap_pad); if (ret) return ret; #endif ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_cap_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s, supports %s VBI\n", video_device_node_name(vfd), (dev->has_raw_vbi_cap && dev->has_sliced_vbi_cap) ? "raw and sliced" : (dev->has_raw_vbi_cap ? "raw" : "sliced")); } if (dev->has_vbi_out) { vfd = &dev->vbi_out_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-vbi-out", inst); vfd->vfl_dir = VFL_DIR_TX; vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->vbi_out_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->queue = &dev->vb_vbi_out_q; vfd->lock = &dev->mutex; vfd->tvnorms = tvnorms_out; video_set_drvdata(vfd, dev); #ifdef CONFIG_MEDIA_CONTROLLER dev->vbi_out_pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&vfd->entity, 1, &dev->vbi_out_pad); if (ret) return ret; #endif ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_out_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 output device registered as %s, supports %s VBI\n", video_device_node_name(vfd), (dev->has_raw_vbi_out && dev->has_sliced_vbi_out) ? "raw and sliced" : (dev->has_raw_vbi_out ? "raw" : "sliced")); } if (dev->has_sdr_cap) { vfd = &dev->sdr_cap_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-sdr-cap", inst); vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->sdr_cap_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->queue = &dev->vb_sdr_cap_q; vfd->lock = &dev->mutex; video_set_drvdata(vfd, dev); #ifdef CONFIG_MEDIA_CONTROLLER dev->sdr_cap_pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&vfd->entity, 1, &dev->sdr_cap_pad); if (ret) return ret; #endif ret = video_register_device(vfd, VFL_TYPE_SDR, sdr_cap_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s\n", video_device_node_name(vfd)); } if (dev->has_radio_rx) { vfd = &dev->radio_rx_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-rad-rx", inst); vfd->fops = &vivid_radio_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->radio_rx_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->lock = &dev->mutex; video_set_drvdata(vfd, dev); ret = video_register_device(vfd, VFL_TYPE_RADIO, radio_rx_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 receiver device registered as %s\n", video_device_node_name(vfd)); } if (dev->has_radio_tx) { vfd = &dev->radio_tx_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-rad-tx", inst); vfd->vfl_dir = VFL_DIR_TX; vfd->fops = &vivid_radio_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->radio_tx_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->lock = &dev->mutex; video_set_drvdata(vfd, dev); ret = video_register_device(vfd, VFL_TYPE_RADIO, radio_tx_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 transmitter device registered as %s\n", video_device_node_name(vfd)); } if (dev->has_meta_cap) { vfd = &dev->meta_cap_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-meta-cap", inst); vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->meta_cap_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->queue = &dev->vb_meta_cap_q; vfd->lock = &dev->mutex; vfd->tvnorms = tvnorms_cap; video_set_drvdata(vfd, dev); #ifdef CONFIG_MEDIA_CONTROLLER dev->meta_cap_pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&vfd->entity, 1, &dev->meta_cap_pad); if (ret) return ret; #endif ret = video_register_device(vfd, VFL_TYPE_VIDEO, meta_cap_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 metadata capture device registered as %s\n", video_device_node_name(vfd)); } if (dev->has_meta_out) { vfd = &dev->meta_out_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-meta-out", inst); vfd->vfl_dir = VFL_DIR_TX; vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->meta_out_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->queue = &dev->vb_meta_out_q; vfd->lock = &dev->mutex; vfd->tvnorms = tvnorms_out; video_set_drvdata(vfd, dev); #ifdef CONFIG_MEDIA_CONTROLLER dev->meta_out_pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&vfd->entity, 1, &dev->meta_out_pad); if (ret) return ret; #endif ret = video_register_device(vfd, VFL_TYPE_VIDEO, meta_out_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 metadata output device registered as %s\n", video_device_node_name(vfd)); } if (dev->has_touch_cap) { vfd = &dev->touch_cap_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-touch-cap", inst); vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->touch_cap_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->queue = &dev->vb_touch_cap_q; vfd->tvnorms = tvnorms_cap; vfd->lock = &dev->mutex; video_set_drvdata(vfd, dev); #ifdef CONFIG_MEDIA_CONTROLLER dev->touch_cap_pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&vfd->entity, 1, &dev->touch_cap_pad); if (ret) return ret; #endif ret = video_register_device(vfd, VFL_TYPE_TOUCH, touch_cap_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 touch capture device registered as %s\n", video_device_node_name(vfd)); } #ifdef CONFIG_MEDIA_CONTROLLER /* Register the media device */ ret = media_device_register(&dev->mdev); if (ret) { dev_err(dev->mdev.dev, "media device register failed (err=%d)\n", ret); return ret; } #endif return 0; } static void update_hdmi_ctrls_work_handler(struct work_struct *work) { u64 skip_mask; u64 update_mask; spin_lock(&hdmi_output_skip_mask_lock); skip_mask = hdmi_to_output_menu_skip_mask; update_mask = hdmi_input_update_outputs_mask; hdmi_input_update_outputs_mask = 0; spin_unlock(&hdmi_output_skip_mask_lock); for (int i = 0; i < n_devs && vivid_devs[i]; i++) { if (update_mask & (1 << i)) vivid_update_connected_outputs(vivid_devs[i]); for (int j = 0; j < vivid_devs[i]->num_hdmi_inputs; j++) { struct v4l2_ctrl *c = vivid_devs[i]->ctrl_hdmi_to_output[j]; v4l2_ctrl_modify_range(c, c->minimum, c->maximum, skip_mask & ~(1ULL << c->cur.val), c->default_value); } } } static void update_svid_ctrls_work_handler(struct work_struct *work) { u64 skip_mask; spin_lock(&svid_output_skip_mask_lock); skip_mask = svid_to_output_menu_skip_mask; spin_unlock(&svid_output_skip_mask_lock); for (int i = 0; i < n_devs && vivid_devs[i]; i++) { for (int j = 0; j < vivid_devs[i]->num_svid_inputs; j++) { struct v4l2_ctrl *c = vivid_devs[i]->ctrl_svid_to_output[j]; v4l2_ctrl_modify_range(c, c->minimum, c->maximum, skip_mask & ~(1ULL << c->cur.val), c->default_value); } } } static int vivid_create_instance(struct platform_device *pdev, int inst) { static const struct v4l2_dv_timings def_dv_timings = V4L2_DV_BT_CEA_1280X720P60; unsigned in_type_counter[4] = { 0, 0, 0, 0 }; unsigned out_type_counter[4] = { 0, 0, 0, 0 }; int ccs_cap = ccs_cap_mode[inst]; int ccs_out = ccs_out_mode[inst]; bool has_tuner; bool has_modulator; struct vivid_dev *dev; unsigned node_type = node_types[inst]; v4l2_std_id tvnorms_cap = 0, tvnorms_out = 0; int ret; int i; /* allocate main vivid state structure */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->inst = inst; #ifdef CONFIG_MEDIA_CONTROLLER dev->v4l2_dev.mdev = &dev->mdev; /* Initialize media device */ strscpy(dev->mdev.model, VIVID_MODULE_NAME, sizeof(dev->mdev.model)); snprintf(dev->mdev.bus_info, sizeof(dev->mdev.bus_info), "platform:%s-%03d", VIVID_MODULE_NAME, inst); dev->mdev.dev = &pdev->dev; media_device_init(&dev->mdev); dev->mdev.ops = &vivid_media_ops; #endif /* register v4l2_device */ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s-%03d", VIVID_MODULE_NAME, inst); ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) { kfree(dev); return ret; } dev->v4l2_dev.release = vivid_dev_release; ret = vivid_detect_feature_set(dev, inst, node_type, &has_tuner, &has_modulator, &ccs_cap, &ccs_out, in_type_counter, out_type_counter); if (ret) goto free_dev; vivid_set_capabilities(dev); ret = -ENOMEM; /* initialize the test pattern generator */ tpg_init(&dev->tpg, 640, 360); if (tpg_alloc(&dev->tpg, array_size(MAX_WIDTH, MAX_ZOOM))) goto free_dev; dev->scaled_line = vzalloc(array_size(MAX_WIDTH, MAX_ZOOM)); if (!dev->scaled_line) goto free_dev; dev->blended_line = vzalloc(array_size(MAX_WIDTH, MAX_ZOOM)); if (!dev->blended_line) goto free_dev; /* load the edid */ dev->edid = vmalloc(array_size(256, 128)); if (!dev->edid) goto free_dev; ret = vivid_init_dv_timings(dev); if (ret < 0) goto free_dev; vivid_disable_unused_ioctls(dev, has_tuner, has_modulator, in_type_counter, out_type_counter); /* configure internal data */ dev->fmt_cap = &vivid_formats[0]; dev->fmt_out = &vivid_formats[0]; if (!dev->multiplanar) vivid_formats[0].data_offset[0] = 0; dev->webcam_size_idx = 1; dev->webcam_ival_idx = 3; tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc); dev->std_out = V4L2_STD_PAL; if (dev->input_type[0] == TV || dev->input_type[0] == SVID) tvnorms_cap = V4L2_STD_ALL; if (dev->output_type[0] == SVID) tvnorms_out = V4L2_STD_ALL; for (i = 0; i < MAX_INPUTS; i++) { dev->dv_timings_cap[i] = def_dv_timings; dev->std_cap[i] = V4L2_STD_PAL; } dev->dv_timings_out = def_dv_timings; dev->tv_freq = 2804 /* 175.25 * 16 */; dev->tv_audmode = V4L2_TUNER_MODE_STEREO; dev->tv_field_cap = V4L2_FIELD_INTERLACED; dev->tv_field_out = V4L2_FIELD_INTERLACED; dev->radio_rx_freq = 95000 * 16; dev->radio_rx_audmode = V4L2_TUNER_MODE_STEREO; if (dev->has_radio_tx) { dev->radio_tx_freq = 95500 * 16; dev->radio_rds_loop = false; } dev->radio_tx_subchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_RDS; dev->sdr_adc_freq = 300000; dev->sdr_fm_freq = 50000000; dev->sdr_pixelformat = V4L2_SDR_FMT_CU8; dev->sdr_buffersize = SDR_CAP_SAMPLES_PER_BUF * 2; dev->edid_max_blocks = dev->edid_blocks = 2; memcpy(dev->edid, vivid_hdmi_edid, sizeof(vivid_hdmi_edid)); dev->radio_rds_init_time = ktime_get(); INIT_WORK(&dev->update_hdmi_ctrl_work, update_hdmi_ctrls_work_handler); INIT_WORK(&dev->update_svid_ctrl_work, update_svid_ctrls_work_handler); for (int j = 0, k = 0; j < dev->num_inputs; ++j) if (dev->input_type[j] == HDMI) dev->hdmi_index_to_input_index[k++] = j; for (int j = 0, k = 0; j < dev->num_outputs; ++j) if (dev->output_type[j] == HDMI) { dev->output_to_iface_index[j] = k; dev->hdmi_index_to_output_index[k++] = j; } for (int j = 0, k = 0; j < dev->num_inputs; ++j) if (dev->input_type[j] == SVID) dev->svid_index_to_input_index[k++] = j; for (int j = 0, k = 0; j < dev->num_outputs; ++j) if (dev->output_type[j] == SVID) dev->output_to_iface_index[j] = k++; /* create all controls */ ret = vivid_create_controls(dev, ccs_cap == -1, ccs_out == -1, no_error_inj, in_type_counter[TV] || in_type_counter[SVID] || out_type_counter[SVID], in_type_counter[HDMI] || out_type_counter[HDMI]); if (ret) goto unreg_dev; /* enable/disable interface specific controls */ if (dev->num_inputs && dev->input_type[0] != HDMI) { v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode, false); v4l2_ctrl_activate(dev->ctrl_dv_timings, false); } else if (dev->num_inputs && dev->input_type[0] == HDMI) { v4l2_ctrl_activate(dev->ctrl_std_signal_mode, false); v4l2_ctrl_activate(dev->ctrl_standard, false); } /* * update the capture and output formats to do a proper initial * configuration. */ vivid_update_format_cap(dev, false); vivid_update_format_out(dev); /* update touch configuration */ dev->timeperframe_tch_cap.numerator = 1; dev->timeperframe_tch_cap.denominator = 10; vivid_set_touch(dev, 0); /* initialize locks */ spin_lock_init(&dev->slock); mutex_init(&dev->mutex); /* init dma queues */ INIT_LIST_HEAD(&dev->vid_cap_active); INIT_LIST_HEAD(&dev->vid_out_active); INIT_LIST_HEAD(&dev->vbi_cap_active); INIT_LIST_HEAD(&dev->vbi_out_active); INIT_LIST_HEAD(&dev->sdr_cap_active); INIT_LIST_HEAD(&dev->meta_cap_active); INIT_LIST_HEAD(&dev->meta_out_active); INIT_LIST_HEAD(&dev->touch_cap_active); spin_lock_init(&dev->cec_xfers_slock); if (allocators[inst] == 1) dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); ret = vivid_create_queues(dev); if (ret) goto unreg_dev; #ifdef CONFIG_VIDEO_VIVID_CEC if (dev->has_vid_cap && in_type_counter[HDMI]) { struct cec_adapter *adap; adap = vivid_cec_alloc_adap(dev, 0, false); ret = PTR_ERR_OR_ZERO(adap); if (ret < 0) goto unreg_dev; dev->cec_rx_adap = adap; } if (dev->has_vid_out) { int j; for (i = j = 0; i < dev->num_outputs; i++) { struct cec_adapter *adap; if (dev->output_type[i] != HDMI) continue; adap = vivid_cec_alloc_adap(dev, j, true); ret = PTR_ERR_OR_ZERO(adap); if (ret < 0) { while (j--) cec_delete_adapter(dev->cec_tx_adap[j]); goto unreg_dev; } dev->cec_tx_adap[j++] = adap; } } if (dev->cec_rx_adap || dev->num_hdmi_outputs) { init_waitqueue_head(&dev->kthread_waitq_cec); dev->kthread_cec = kthread_run(vivid_cec_bus_thread, dev, "vivid_cec-%s", dev->v4l2_dev.name); if (IS_ERR(dev->kthread_cec)) { ret = PTR_ERR(dev->kthread_cec); dev->kthread_cec = NULL; v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n"); goto unreg_dev; } } #endif v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_cap); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_out); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vbi_cap); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vbi_out); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_rx); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_tx); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_sdr_cap); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_cap); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_out); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_touch_cap); /* finally start creating the device nodes */ ret = vivid_create_devnodes(pdev, dev, inst, tvnorms_cap, tvnorms_out, in_type_counter, out_type_counter); if (ret) goto unreg_dev; /* Now that everything is fine, let's add it to device list */ vivid_devs[inst] = dev; return 0; unreg_dev: vb2_video_unregister_device(&dev->touch_cap_dev); vb2_video_unregister_device(&dev->meta_out_dev); vb2_video_unregister_device(&dev->meta_cap_dev); video_unregister_device(&dev->radio_tx_dev); video_unregister_device(&dev->radio_rx_dev); vb2_video_unregister_device(&dev->sdr_cap_dev); vb2_video_unregister_device(&dev->vbi_out_dev); vb2_video_unregister_device(&dev->vbi_cap_dev); vb2_video_unregister_device(&dev->vid_out_dev); vb2_video_unregister_device(&dev->vid_cap_dev); cec_unregister_adapter(dev->cec_rx_adap); for (i = 0; i < MAX_HDMI_OUTPUTS; i++) cec_unregister_adapter(dev->cec_tx_adap[i]); if (dev->kthread_cec) kthread_stop(dev->kthread_cec); free_dev: v4l2_device_put(&dev->v4l2_dev); return ret; } /* This routine allocates from 1 to n_devs virtual drivers. The real maximum number of virtual drivers will depend on how many drivers will succeed. This is limited to the maximum number of devices that videodev supports, which is equal to VIDEO_NUM_DEVICES. */ static int vivid_probe(struct platform_device *pdev) { const struct font_desc *font = find_font("VGA8x16"); int ret = 0, i; if (font == NULL) { pr_err("vivid: could not find font\n"); return -ENODEV; } tpg_set_font(font->data); n_devs = clamp_t(unsigned, n_devs, 1, VIVID_MAX_DEVS); for (i = 0; i < n_devs; i++) { ret = vivid_create_instance(pdev, i); if (ret) { /* If some instantiations succeeded, keep driver */ if (i) ret = 0; break; } } if (ret < 0) { pr_err("vivid: error %d while loading driver\n", ret); return ret; } /* n_devs will reflect the actual number of allocated devices */ n_devs = i; /* Determine qmenu items actually in use */ int hdmi_count = FIXED_MENU_ITEMS; int svid_count = FIXED_MENU_ITEMS; for (int i = 0; i < n_devs; i++) { struct vivid_dev *dev = vivid_devs[i]; if (!dev->has_vid_out) continue; for (int j = 0; j < dev->num_outputs && hdmi_count < MAX_MENU_ITEMS; ++j) { if (dev->output_type[j] == HDMI) { vivid_ctrl_hdmi_to_output_instance[hdmi_count] = vivid_devs[i]; vivid_ctrl_hdmi_to_output_index[hdmi_count++] = j; } } for (int j = 0; j < dev->num_outputs && svid_count < MAX_MENU_ITEMS; ++j) { if (dev->output_type[j] == SVID) { vivid_ctrl_svid_to_output_instance[svid_count] = vivid_devs[i]; vivid_ctrl_svid_to_output_index[svid_count++] = j; } } } hdmi_count = min(hdmi_count, MAX_MENU_ITEMS); svid_count = min(svid_count, MAX_MENU_ITEMS); for (int i = 0; i < n_devs; i++) { for (int j = 0; j < vivid_devs[i]->num_hdmi_inputs; j++) { struct v4l2_ctrl *c = vivid_devs[i]->ctrl_hdmi_to_output[j]; v4l2_ctrl_modify_range(c, c->minimum, hdmi_count - 1, 0, c->default_value); } for (int j = 0; j < vivid_devs[i]->num_svid_inputs; j++) { struct v4l2_ctrl *c = vivid_devs[i]->ctrl_svid_to_output[j]; v4l2_ctrl_modify_range(c, c->minimum, svid_count - 1, 0, c->default_value); } } return ret; } static void vivid_remove(struct platform_device *pdev) { struct vivid_dev *dev; unsigned int i, j; for (i = 0; i < n_devs; i++) { dev = vivid_devs[i]; if (!dev) continue; if (dev->disconnect_error) vivid_reconnect(dev); #ifdef CONFIG_MEDIA_CONTROLLER media_device_unregister(&dev->mdev); #endif if (dev->has_vid_cap) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->vid_cap_dev)); vb2_video_unregister_device(&dev->vid_cap_dev); } if (dev->has_vid_out) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->vid_out_dev)); vb2_video_unregister_device(&dev->vid_out_dev); } if (dev->has_vbi_cap) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->vbi_cap_dev)); vb2_video_unregister_device(&dev->vbi_cap_dev); } if (dev->has_vbi_out) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->vbi_out_dev)); vb2_video_unregister_device(&dev->vbi_out_dev); } if (dev->has_sdr_cap) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->sdr_cap_dev)); vb2_video_unregister_device(&dev->sdr_cap_dev); } if (dev->has_radio_rx) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->radio_rx_dev)); video_unregister_device(&dev->radio_rx_dev); } if (dev->has_radio_tx) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->radio_tx_dev)); video_unregister_device(&dev->radio_tx_dev); } if (dev->has_fb) vivid_fb_deinit(dev); if (dev->has_meta_cap) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->meta_cap_dev)); vb2_video_unregister_device(&dev->meta_cap_dev); } if (dev->has_meta_out) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->meta_out_dev)); vb2_video_unregister_device(&dev->meta_out_dev); } if (dev->has_touch_cap) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->touch_cap_dev)); vb2_video_unregister_device(&dev->touch_cap_dev); } cec_unregister_adapter(dev->cec_rx_adap); for (j = 0; j < MAX_HDMI_OUTPUTS; j++) cec_unregister_adapter(dev->cec_tx_adap[j]); if (dev->kthread_cec) kthread_stop(dev->kthread_cec); v4l2_device_put(&dev->v4l2_dev); vivid_devs[i] = NULL; } } static void vivid_pdev_release(struct device *dev) { } static struct platform_device vivid_pdev = { .name = "vivid", .dev.release = vivid_pdev_release, }; static struct platform_driver vivid_pdrv = { .probe = vivid_probe, .remove = vivid_remove, .driver = { .name = "vivid", }, }; static int __init vivid_init(void) { int hdmi_count = FIXED_MENU_ITEMS; int svid_count = FIXED_MENU_ITEMS; int ret = -ENOMEM; unsigned int ndevs; /* Sanity check, prevent insane number of vivid instances */ if (n_devs > 64) n_devs = 64; ndevs = clamp_t(unsigned int, n_devs, 1, VIVID_MAX_DEVS); for (unsigned int i = 0; i < ndevs; i++) { if (!(node_types[i] & (1 << 8))) continue; unsigned int n_outputs = min(num_outputs[i], MAX_OUTPUTS); for (u8 j = 0, k = 0; j < n_outputs && hdmi_count < MAX_MENU_ITEMS && k < MAX_HDMI_OUTPUTS; ++j) { if (output_types[i] & BIT(j)) { vivid_ctrl_hdmi_to_output_strings[hdmi_count] = kmalloc(MAX_STRING_LENGTH, GFP_KERNEL); if (!vivid_ctrl_hdmi_to_output_strings[hdmi_count]) goto free_output_strings; snprintf(vivid_ctrl_hdmi_to_output_strings[hdmi_count], MAX_STRING_LENGTH, "Output HDMI %03d-%d", i & 0xff, k); k++; hdmi_count++; } } for (u8 j = 0, k = 0; j < n_outputs && svid_count < MAX_MENU_ITEMS; ++j) { if (!(output_types[i] & BIT(j))) { vivid_ctrl_svid_to_output_strings[svid_count] = kmalloc(MAX_STRING_LENGTH, GFP_KERNEL); if (!vivid_ctrl_svid_to_output_strings[svid_count]) goto free_output_strings; snprintf(vivid_ctrl_svid_to_output_strings[svid_count], MAX_STRING_LENGTH, "Output S-Video %03d-%d", i & 0xff, k); k++; svid_count++; } } } ret = platform_device_register(&vivid_pdev); if (ret) goto free_output_strings; ret = platform_driver_register(&vivid_pdrv); if (ret) goto unreg_device; /* Initialize workqueue before module is loaded */ update_hdmi_ctrls_workqueue = create_workqueue("update_hdmi_ctrls_wq"); if (!update_hdmi_ctrls_workqueue) { ret = -ENOMEM; goto unreg_driver; } update_svid_ctrls_workqueue = create_workqueue("update_svid_ctrls_wq"); if (!update_svid_ctrls_workqueue) { ret = -ENOMEM; goto destroy_hdmi_wq; } return ret; destroy_hdmi_wq: destroy_workqueue(update_hdmi_ctrls_workqueue); unreg_driver: platform_driver_register(&vivid_pdrv); unreg_device: platform_device_unregister(&vivid_pdev); free_output_strings: for (int i = FIXED_MENU_ITEMS; i < MAX_MENU_ITEMS; i++) { kfree(vivid_ctrl_hdmi_to_output_strings[i]); kfree(vivid_ctrl_svid_to_output_strings[i]); } return ret; } static void __exit vivid_exit(void) { for (int i = FIXED_MENU_ITEMS; i < MAX_MENU_ITEMS; i++) { kfree(vivid_ctrl_hdmi_to_output_strings[i]); kfree(vivid_ctrl_svid_to_output_strings[i]); } destroy_workqueue(update_svid_ctrls_workqueue); destroy_workqueue(update_hdmi_ctrls_workqueue); platform_driver_unregister(&vivid_pdrv); platform_device_unregister(&vivid_pdev); } module_init(vivid_init); module_exit(vivid_exit);
32 33 33 15 15 15 15 15 15 15 15 15 18 18 18 18 18 18 18 18 18 18 18 17 17 18 18 18 493 7 522 494 494 493 493 494 740 741 740 18 740 447 445 11 11 11 11 11 11 11 11 11 11 8 11 11 8 11 11 11 11 11 9 9 10 11 11 11 7 7 11 11 11 7 7 7 7 7 7 7 7 7 7 7 7 7 4 4 7 7 4 7 7 7 7 11 7 8 8 8 8 8 8 8 8 8 5 8 742 740 741 11 9 10 742 741 738 740 740 449 447 447 446 448 447 449 447 448 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 // SPDX-License-Identifier: GPL-2.0 #include <linux/memcontrol.h> #include <linux/rwsem.h> #include <linux/shrinker.h> #include <linux/rculist.h> #include <trace/events/vmscan.h> #include "internal.h" LIST_HEAD(shrinker_list); DEFINE_MUTEX(shrinker_mutex); #ifdef CONFIG_MEMCG static int shrinker_nr_max; static inline int shrinker_unit_size(int nr_items) { return (DIV_ROUND_UP(nr_items, SHRINKER_UNIT_BITS) * sizeof(struct shrinker_info_unit *)); } static inline void shrinker_unit_free(struct shrinker_info *info, int start) { struct shrinker_info_unit **unit; int nr, i; if (!info) return; unit = info->unit; nr = DIV_ROUND_UP(info->map_nr_max, SHRINKER_UNIT_BITS); for (i = start; i < nr; i++) { if (!unit[i]) break; kfree(unit[i]); unit[i] = NULL; } } static inline int shrinker_unit_alloc(struct shrinker_info *new, struct shrinker_info *old, int nid) { struct shrinker_info_unit *unit; int nr = DIV_ROUND_UP(new->map_nr_max, SHRINKER_UNIT_BITS); int start = old ? DIV_ROUND_UP(old->map_nr_max, SHRINKER_UNIT_BITS) : 0; int i; for (i = start; i < nr; i++) { unit = kzalloc_node(sizeof(*unit), GFP_KERNEL, nid); if (!unit) { shrinker_unit_free(new, start); return -ENOMEM; } new->unit[i] = unit; } return 0; } void free_shrinker_info(struct mem_cgroup *memcg) { struct mem_cgroup_per_node *pn; struct shrinker_info *info; int nid; for_each_node(nid) { pn = memcg->nodeinfo[nid]; info = rcu_dereference_protected(pn->shrinker_info, true); shrinker_unit_free(info, 0); kvfree(info); rcu_assign_pointer(pn->shrinker_info, NULL); } } int alloc_shrinker_info(struct mem_cgroup *memcg) { int nid, ret = 0; int array_size = 0; mutex_lock(&shrinker_mutex); array_size = shrinker_unit_size(shrinker_nr_max); for_each_node(nid) { struct shrinker_info *info = kvzalloc_node(sizeof(*info) + array_size, GFP_KERNEL, nid); if (!info) goto err; info->map_nr_max = shrinker_nr_max; if (shrinker_unit_alloc(info, NULL, nid)) { kvfree(info); goto err; } rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); } mutex_unlock(&shrinker_mutex); return ret; err: mutex_unlock(&shrinker_mutex); free_shrinker_info(memcg); return -ENOMEM; } static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg, int nid) { return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info, lockdep_is_held(&shrinker_mutex)); } static int expand_one_shrinker_info(struct mem_cgroup *memcg, int new_size, int old_size, int new_nr_max) { struct shrinker_info *new, *old; struct mem_cgroup_per_node *pn; int nid; for_each_node(nid) { pn = memcg->nodeinfo[nid]; old = shrinker_info_protected(memcg, nid); /* Not yet online memcg */ if (!old) return 0; /* Already expanded this shrinker_info */ if (new_nr_max <= old->map_nr_max) continue; new = kvzalloc_node(sizeof(*new) + new_size, GFP_KERNEL, nid); if (!new) return -ENOMEM; new->map_nr_max = new_nr_max; memcpy(new->unit, old->unit, old_size); if (shrinker_unit_alloc(new, old, nid)) { kvfree(new); return -ENOMEM; } rcu_assign_pointer(pn->shrinker_info, new); kvfree_rcu(old, rcu); } return 0; } static int expand_shrinker_info(int new_id) { int ret = 0; int new_nr_max = round_up(new_id + 1, SHRINKER_UNIT_BITS); int new_size, old_size = 0; struct mem_cgroup *memcg; if (!root_mem_cgroup) goto out; lockdep_assert_held(&shrinker_mutex); new_size = shrinker_unit_size(new_nr_max); old_size = shrinker_unit_size(shrinker_nr_max); memcg = mem_cgroup_iter(NULL, NULL, NULL); do { ret = expand_one_shrinker_info(memcg, new_size, old_size, new_nr_max); if (ret) { mem_cgroup_iter_break(NULL, memcg); goto out; } } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); out: if (!ret) shrinker_nr_max = new_nr_max; return ret; } static inline int shrinker_id_to_index(int shrinker_id) { return shrinker_id / SHRINKER_UNIT_BITS; } static inline int shrinker_id_to_offset(int shrinker_id) { return shrinker_id % SHRINKER_UNIT_BITS; } static inline int calc_shrinker_id(int index, int offset) { return index * SHRINKER_UNIT_BITS + offset; } void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) { if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { struct shrinker_info *info; struct shrinker_info_unit *unit; rcu_read_lock(); info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info); unit = info->unit[shrinker_id_to_index(shrinker_id)]; if (!WARN_ON_ONCE(shrinker_id >= info->map_nr_max)) { /* Pairs with smp mb in shrink_slab() */ smp_mb__before_atomic(); set_bit(shrinker_id_to_offset(shrinker_id), unit->map); } rcu_read_unlock(); } } static DEFINE_IDR(shrinker_idr); static int shrinker_memcg_alloc(struct shrinker *shrinker) { int id, ret = -ENOMEM; if (mem_cgroup_disabled()) return -ENOSYS; mutex_lock(&shrinker_mutex); id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL); if (id < 0) goto unlock; if (id >= shrinker_nr_max) { if (expand_shrinker_info(id)) { idr_remove(&shrinker_idr, id); goto unlock; } } shrinker->id = id; ret = 0; unlock: mutex_unlock(&shrinker_mutex); return ret; } static void shrinker_memcg_remove(struct shrinker *shrinker) { int id = shrinker->id; BUG_ON(id < 0); lockdep_assert_held(&shrinker_mutex); idr_remove(&shrinker_idr, id); } static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker, struct mem_cgroup *memcg) { struct shrinker_info *info; struct shrinker_info_unit *unit; long nr_deferred; rcu_read_lock(); info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info); unit = info->unit[shrinker_id_to_index(shrinker->id)]; nr_deferred = atomic_long_xchg(&unit->nr_deferred[shrinker_id_to_offset(shrinker->id)], 0); rcu_read_unlock(); return nr_deferred; } static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker, struct mem_cgroup *memcg) { struct shrinker_info *info; struct shrinker_info_unit *unit; long nr_deferred; rcu_read_lock(); info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info); unit = info->unit[shrinker_id_to_index(shrinker->id)]; nr_deferred = atomic_long_add_return(nr, &unit->nr_deferred[shrinker_id_to_offset(shrinker->id)]); rcu_read_unlock(); return nr_deferred; } void reparent_shrinker_deferred(struct mem_cgroup *memcg) { int nid, index, offset; long nr; struct mem_cgroup *parent; struct shrinker_info *child_info, *parent_info; struct shrinker_info_unit *child_unit, *parent_unit; parent = parent_mem_cgroup(memcg); if (!parent) parent = root_mem_cgroup; /* Prevent from concurrent shrinker_info expand */ mutex_lock(&shrinker_mutex); for_each_node(nid) { child_info = shrinker_info_protected(memcg, nid); parent_info = shrinker_info_protected(parent, nid); for (index = 0; index < shrinker_id_to_index(child_info->map_nr_max); index++) { child_unit = child_info->unit[index]; parent_unit = parent_info->unit[index]; for (offset = 0; offset < SHRINKER_UNIT_BITS; offset++) { nr = atomic_long_read(&child_unit->nr_deferred[offset]); atomic_long_add(nr, &parent_unit->nr_deferred[offset]); } } } mutex_unlock(&shrinker_mutex); } #else static int shrinker_memcg_alloc(struct shrinker *shrinker) { return -ENOSYS; } static void shrinker_memcg_remove(struct shrinker *shrinker) { } static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker, struct mem_cgroup *memcg) { return 0; } static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker, struct mem_cgroup *memcg) { return 0; } #endif /* CONFIG_MEMCG */ static long xchg_nr_deferred(struct shrinker *shrinker, struct shrink_control *sc) { int nid = sc->nid; if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) nid = 0; if (sc->memcg && (shrinker->flags & SHRINKER_MEMCG_AWARE)) return xchg_nr_deferred_memcg(nid, shrinker, sc->memcg); return atomic_long_xchg(&shrinker->nr_deferred[nid], 0); } static long add_nr_deferred(long nr, struct shrinker *shrinker, struct shrink_control *sc) { int nid = sc->nid; if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) nid = 0; if (sc->memcg && (shrinker->flags & SHRINKER_MEMCG_AWARE)) return add_nr_deferred_memcg(nr, nid, shrinker, sc->memcg); return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]); } #define SHRINK_BATCH 128 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, struct shrinker *shrinker, int priority) { unsigned long freed = 0; unsigned long long delta; long total_scan; long freeable; long nr; long new_nr; long batch_size = shrinker->batch ? shrinker->batch : SHRINK_BATCH; long scanned = 0, next_deferred; freeable = shrinker->count_objects(shrinker, shrinkctl); if (freeable == 0 || freeable == SHRINK_EMPTY) return freeable; /* * copy the current shrinker scan count into a local variable * and zero it so that other concurrent shrinker invocations * don't also do this scanning work. */ nr = xchg_nr_deferred(shrinker, shrinkctl); if (shrinker->seeks) { delta = freeable >> priority; delta *= 4; do_div(delta, shrinker->seeks); } else { /* * These objects don't require any IO to create. Trim * them aggressively under memory pressure to keep * them from causing refetches in the IO caches. */ delta = freeable / 2; } total_scan = nr >> priority; total_scan += delta; total_scan = min(total_scan, (2 * freeable)); trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, freeable, delta, total_scan, priority); /* * Normally, we should not scan less than batch_size objects in one * pass to avoid too frequent shrinker calls, but if the slab has less * than batch_size objects in total and we are really tight on memory, * we will try to reclaim all available objects, otherwise we can end * up failing allocations although there are plenty of reclaimable * objects spread over several slabs with usage less than the * batch_size. * * We detect the "tight on memory" situations by looking at the total * number of objects we want to scan (total_scan). If it is greater * than the total number of objects on slab (freeable), we must be * scanning at high prio and therefore should try to reclaim as much as * possible. */ while (total_scan >= batch_size || total_scan >= freeable) { unsigned long ret; unsigned long nr_to_scan = min(batch_size, total_scan); shrinkctl->nr_to_scan = nr_to_scan; shrinkctl->nr_scanned = nr_to_scan; ret = shrinker->scan_objects(shrinker, shrinkctl); if (ret == SHRINK_STOP) break; freed += ret; count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned); total_scan -= shrinkctl->nr_scanned; scanned += shrinkctl->nr_scanned; cond_resched(); } /* * The deferred work is increased by any new work (delta) that wasn't * done, decreased by old deferred work that was done now. * * And it is capped to two times of the freeable items. */ next_deferred = max_t(long, (nr + delta - scanned), 0); next_deferred = min(next_deferred, (2 * freeable)); /* * move the unused scan count back into the shrinker in a * manner that handles concurrent updates. */ new_nr = add_nr_deferred(next_deferred, shrinker, shrinkctl); trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan); return freed; } #ifdef CONFIG_MEMCG static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, int priority) { struct shrinker_info *info; unsigned long ret, freed = 0; int offset, index = 0; if (!mem_cgroup_online(memcg)) return 0; /* * lockless algorithm of memcg shrink. * * The shrinker_info may be freed asynchronously via RCU in the * expand_one_shrinker_info(), so the rcu_read_lock() needs to be used * to ensure the existence of the shrinker_info. * * The shrinker_info_unit is never freed unless its corresponding memcg * is destroyed. Here we already hold the refcount of memcg, so the * memcg will not be destroyed, and of course shrinker_info_unit will * not be freed. * * So in the memcg shrink: * step 1: use rcu_read_lock() to guarantee existence of the * shrinker_info. * step 2: after getting shrinker_info_unit we can safely release the * RCU lock. * step 3: traverse the bitmap and calculate shrinker_id * step 4: use rcu_read_lock() to guarantee existence of the shrinker. * step 5: use shrinker_id to find the shrinker, then use * shrinker_try_get() to guarantee existence of the shrinker, * then we can release the RCU lock to do do_shrink_slab() that * may sleep. * step 6: do shrinker_put() paired with step 5 to put the refcount, * if the refcount reaches 0, then wake up the waiter in * shrinker_free() by calling complete(). * Note: here is different from the global shrink, we don't * need to acquire the RCU lock to guarantee existence of * the shrinker, because we don't need to use this * shrinker to traverse the next shrinker in the bitmap. * step 7: we have already exited the read-side of rcu critical section * before calling do_shrink_slab(), the shrinker_info may be * released in expand_one_shrinker_info(), so go back to step 1 * to reacquire the shrinker_info. */ again: rcu_read_lock(); info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info); if (unlikely(!info)) goto unlock; if (index < shrinker_id_to_index(info->map_nr_max)) { struct shrinker_info_unit *unit; unit = info->unit[index]; rcu_read_unlock(); for_each_set_bit(offset, unit->map, SHRINKER_UNIT_BITS) { struct shrink_control sc = { .gfp_mask = gfp_mask, .nid = nid, .memcg = memcg, }; struct shrinker *shrinker; int shrinker_id = calc_shrinker_id(index, offset); rcu_read_lock(); shrinker = idr_find(&shrinker_idr, shrinker_id); if (unlikely(!shrinker || !shrinker_try_get(shrinker))) { clear_bit(offset, unit->map); rcu_read_unlock(); continue; } rcu_read_unlock(); /* Call non-slab shrinkers even though kmem is disabled */ if (!memcg_kmem_online() && !(shrinker->flags & SHRINKER_NONSLAB)) continue; ret = do_shrink_slab(&sc, shrinker, priority); if (ret == SHRINK_EMPTY) { clear_bit(offset, unit->map); /* * After the shrinker reported that it had no objects to * free, but before we cleared the corresponding bit in * the memcg shrinker map, a new object might have been * added. To make sure, we have the bit set in this * case, we invoke the shrinker one more time and reset * the bit if it reports that it is not empty anymore. * The memory barrier here pairs with the barrier in * set_shrinker_bit(): * * list_lru_add() shrink_slab_memcg() * list_add_tail() clear_bit() * <MB> <MB> * set_bit() do_shrink_slab() */ smp_mb__after_atomic(); ret = do_shrink_slab(&sc, shrinker, priority); if (ret == SHRINK_EMPTY) ret = 0; else set_shrinker_bit(memcg, nid, shrinker_id); } freed += ret; shrinker_put(shrinker); } index++; goto again; } unlock: rcu_read_unlock(); return freed; } #else /* !CONFIG_MEMCG */ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, int priority) { return 0; } #endif /* CONFIG_MEMCG */ /** * shrink_slab - shrink slab caches * @gfp_mask: allocation context * @nid: node whose slab caches to target * @memcg: memory cgroup whose slab caches to target * @priority: the reclaim priority * * Call the shrink functions to age shrinkable caches. * * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set, * unaware shrinkers will receive a node id of 0 instead. * * @memcg specifies the memory cgroup to target. Unaware shrinkers * are called only if it is the root cgroup. * * @priority is sc->priority, we take the number of objects and >> by priority * in order to get the scan target. * * Returns the number of reclaimed slab objects. */ unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, int priority) { unsigned long ret, freed = 0; struct shrinker *shrinker; /* * The root memcg might be allocated even though memcg is disabled * via "cgroup_disable=memory" boot parameter. This could make * mem_cgroup_is_root() return false, then just run memcg slab * shrink, but skip global shrink. This may result in premature * oom. */ if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg)) return shrink_slab_memcg(gfp_mask, nid, memcg, priority); /* * lockless algorithm of global shrink. * * In the unregistration setp, the shrinker will be freed asynchronously * via RCU after its refcount reaches 0. So both rcu_read_lock() and * shrinker_try_get() can be used to ensure the existence of the shrinker. * * So in the global shrink: * step 1: use rcu_read_lock() to guarantee existence of the shrinker * and the validity of the shrinker_list walk. * step 2: use shrinker_try_get() to try get the refcount, if successful, * then the existence of the shrinker can also be guaranteed, * so we can release the RCU lock to do do_shrink_slab() that * may sleep. * step 3: *MUST* to reacquire the RCU lock before calling shrinker_put(), * which ensures that neither this shrinker nor the next shrinker * will be freed in the next traversal operation. * step 4: do shrinker_put() paired with step 2 to put the refcount, * if the refcount reaches 0, then wake up the waiter in * shrinker_free() by calling complete(). */ rcu_read_lock(); list_for_each_entry_rcu(shrinker, &shrinker_list, list) { struct shrink_control sc = { .gfp_mask = gfp_mask, .nid = nid, .memcg = memcg, }; if (!shrinker_try_get(shrinker)) continue; rcu_read_unlock(); ret = do_shrink_slab(&sc, shrinker, priority); if (ret == SHRINK_EMPTY) ret = 0; freed += ret; rcu_read_lock(); shrinker_put(shrinker); } rcu_read_unlock(); cond_resched(); return freed; } struct shrinker *shrinker_alloc(unsigned int flags, const char *fmt, ...) { struct shrinker *shrinker; unsigned int size; va_list ap; int err; shrinker = kzalloc(sizeof(struct shrinker), GFP_KERNEL); if (!shrinker) return NULL; va_start(ap, fmt); err = shrinker_debugfs_name_alloc(shrinker, fmt, ap); va_end(ap); if (err) goto err_name; shrinker->flags = flags | SHRINKER_ALLOCATED; shrinker->seeks = DEFAULT_SEEKS; if (flags & SHRINKER_MEMCG_AWARE) { err = shrinker_memcg_alloc(shrinker); if (err == -ENOSYS) { /* Memcg is not supported, fallback to non-memcg-aware shrinker. */ shrinker->flags &= ~SHRINKER_MEMCG_AWARE; goto non_memcg; } if (err) goto err_flags; return shrinker; } non_memcg: /* * The nr_deferred is available on per memcg level for memcg aware * shrinkers, so only allocate nr_deferred in the following cases: * - non-memcg-aware shrinkers * - !CONFIG_MEMCG * - memcg is disabled by kernel command line */ size = sizeof(*shrinker->nr_deferred); if (flags & SHRINKER_NUMA_AWARE) size *= nr_node_ids; shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); if (!shrinker->nr_deferred) goto err_flags; return shrinker; err_flags: shrinker_debugfs_name_free(shrinker); err_name: kfree(shrinker); return NULL; } EXPORT_SYMBOL_GPL(shrinker_alloc); void shrinker_register(struct shrinker *shrinker) { if (unlikely(!(shrinker->flags & SHRINKER_ALLOCATED))) { pr_warn("Must use shrinker_alloc() to dynamically allocate the shrinker"); return; } mutex_lock(&shrinker_mutex); list_add_tail_rcu(&shrinker->list, &shrinker_list); shrinker->flags |= SHRINKER_REGISTERED; shrinker_debugfs_add(shrinker); mutex_unlock(&shrinker_mutex); init_completion(&shrinker->done); /* * Now the shrinker is fully set up, take the first reference to it to * indicate that lookup operations are now allowed to use it via * shrinker_try_get(). */ refcount_set(&shrinker->refcount, 1); } EXPORT_SYMBOL_GPL(shrinker_register); static void shrinker_free_rcu_cb(struct rcu_head *head) { struct shrinker *shrinker = container_of(head, struct shrinker, rcu); kfree(shrinker->nr_deferred); kfree(shrinker); } void shrinker_free(struct shrinker *shrinker) { struct dentry *debugfs_entry = NULL; int debugfs_id; if (!shrinker) return; if (shrinker->flags & SHRINKER_REGISTERED) { /* drop the initial refcount */ shrinker_put(shrinker); /* * Wait for all lookups of the shrinker to complete, after that, * no shrinker is running or will run again, then we can safely * free it asynchronously via RCU and safely free the structure * where the shrinker is located, such as super_block etc. */ wait_for_completion(&shrinker->done); } mutex_lock(&shrinker_mutex); if (shrinker->flags & SHRINKER_REGISTERED) { /* * Now we can safely remove it from the shrinker_list and then * free it. */ list_del_rcu(&shrinker->list); debugfs_entry = shrinker_debugfs_detach(shrinker, &debugfs_id); shrinker->flags &= ~SHRINKER_REGISTERED; } shrinker_debugfs_name_free(shrinker); if (shrinker->flags & SHRINKER_MEMCG_AWARE) shrinker_memcg_remove(shrinker); mutex_unlock(&shrinker_mutex); if (debugfs_entry) shrinker_debugfs_remove(debugfs_entry, debugfs_id); call_rcu(&shrinker->rcu, shrinker_free_rcu_cb); } EXPORT_SYMBOL_GPL(shrinker_free);
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 // SPDX-License-Identifier: GPL-2.0-only /* * HID driver for Aureal Cy se W-01RN USB_V3.1 devices * * Copyright (c) 2010 Franco Catrin <fcatrin@gmail.com> * Copyright (c) 2010 Ben Cropley <bcropley@internode.on.net> * * Based on HID sunplus driver by * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" static const __u8 *aureal_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) { dev_info(&hdev->dev, "fixing Aureal Cy se W-01RN USB_V3.1 report descriptor.\n"); rdesc[53] = 0x65; } return rdesc; } static const struct hid_device_id aureal_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, { } }; MODULE_DEVICE_TABLE(hid, aureal_devices); static struct hid_driver aureal_driver = { .name = "aureal", .id_table = aureal_devices, .report_fixup = aureal_report_fixup, }; module_hid_driver(aureal_driver); MODULE_DESCRIPTION("HID driver for Aureal Cy se W-01RN USB_V3.1 devices"); MODULE_LICENSE("GPL");
633 633 512 637 448 449 449 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 // SPDX-License-Identifier: GPL-2.0-only /* * Lock-less NULL terminated single linked list * * The basic atomic operation of this list is cmpxchg on long. On * architectures that don't have NMI-safe cmpxchg implementation, the * list can NOT be used in NMI handlers. So code that uses the list in * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. * * Copyright 2010,2011 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/llist.h> /** * llist_del_first - delete the first entry of lock-less list * @head: the head for your lock-less list * * If list is empty, return NULL, otherwise, return the first entry * deleted, this is the newest added one. * * Only one llist_del_first user can be used simultaneously with * multiple llist_add users without lock. Because otherwise * llist_del_first, llist_add, llist_add (or llist_del_all, llist_add, * llist_add) sequence in another user may change @head->first->next, * but keep @head->first. If multiple consumers are needed, please * use llist_del_all or use lock between consumers. */ struct llist_node *llist_del_first(struct llist_head *head) { struct llist_node *entry, *next; entry = smp_load_acquire(&head->first); do { if (entry == NULL) return NULL; next = READ_ONCE(entry->next); } while (!try_cmpxchg(&head->first, &entry, next)); return entry; } EXPORT_SYMBOL_GPL(llist_del_first); /** * llist_del_first_this - delete given entry of lock-less list if it is first * @head: the head for your lock-less list * @this: a list entry. * * If head of the list is given entry, delete and return %true else * return %false. * * Multiple callers can safely call this concurrently with multiple * llist_add() callers, providing all the callers offer a different @this. */ bool llist_del_first_this(struct llist_head *head, struct llist_node *this) { struct llist_node *entry, *next; /* acquire ensures orderig wrt try_cmpxchg() is llist_del_first() */ entry = smp_load_acquire(&head->first); do { if (entry != this) return false; next = READ_ONCE(entry->next); } while (!try_cmpxchg(&head->first, &entry, next)); return true; } EXPORT_SYMBOL_GPL(llist_del_first_this); /** * llist_reverse_order - reverse order of a llist chain * @head: first item of the list to be reversed * * Reverse the order of a chain of llist entries and return the * new first entry. */ struct llist_node *llist_reverse_order(struct llist_node *head) { struct llist_node *new_head = NULL; while (head) { struct llist_node *tmp = head; head = head->next; tmp->next = new_head; new_head = tmp; } return new_head; } EXPORT_SYMBOL_GPL(llist_reverse_order);
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 // SPDX-License-Identifier: GPL-2.0-only /* * IEEE 802.15.4 scanning management * * Copyright (C) 2021 Qorvo US, Inc * Authors: * - David Girault <david.girault@qorvo.com> * - Miquel Raynal <miquel.raynal@bootlin.com> */ #include <linux/module.h> #include <linux/rtnetlink.h> #include <net/mac802154.h> #include "ieee802154_i.h" #include "driver-ops.h" #include "../ieee802154/nl802154.h" #define IEEE802154_BEACON_MHR_SZ 13 #define IEEE802154_BEACON_PL_SZ 4 #define IEEE802154_MAC_CMD_MHR_SZ 23 #define IEEE802154_MAC_CMD_PL_SZ 1 #define IEEE802154_BEACON_SKB_SZ (IEEE802154_BEACON_MHR_SZ + \ IEEE802154_BEACON_PL_SZ) #define IEEE802154_MAC_CMD_SKB_SZ (IEEE802154_MAC_CMD_MHR_SZ + \ IEEE802154_MAC_CMD_PL_SZ) /* mac802154_scan_cleanup_locked() must be called upon scan completion or abort. * - Completions are asynchronous, not locked by the rtnl and decided by the * scan worker. * - Aborts are decided by userspace, and locked by the rtnl. * * Concurrent modifications to the PHY, the interfaces or the hardware is in * general prevented by the rtnl. So in most cases we don't need additional * protection. * * However, the scan worker get's triggered without anybody noticing and thus we * must ensure the presence of the devices as well as data consistency: * - The sub-interface and device driver module get both their reference * counters incremented whenever we start a scan, so they cannot disappear * during operation. * - Data consistency is achieved by the use of rcu protected pointers. */ static int mac802154_scan_cleanup_locked(struct ieee802154_local *local, struct ieee802154_sub_if_data *sdata, bool aborted) { struct wpan_dev *wpan_dev = &sdata->wpan_dev; struct wpan_phy *wpan_phy = local->phy; struct cfg802154_scan_request *request; u8 arg; /* Prevent any further use of the scan request */ clear_bit(IEEE802154_IS_SCANNING, &local->ongoing); cancel_delayed_work(&local->scan_work); request = rcu_replace_pointer(local->scan_req, NULL, 1); if (!request) return 0; kvfree_rcu_mightsleep(request); /* Advertize first, while we know the devices cannot be removed */ if (aborted) arg = NL802154_SCAN_DONE_REASON_ABORTED; else arg = NL802154_SCAN_DONE_REASON_FINISHED; nl802154_scan_done(wpan_phy, wpan_dev, arg); /* Cleanup software stack */ ieee802154_mlme_op_post(local); /* Set the hardware back in its original state */ drv_set_channel(local, wpan_phy->current_page, wpan_phy->current_channel); ieee802154_configure_durations(wpan_phy, wpan_phy->current_page, wpan_phy->current_channel); drv_stop(local); synchronize_net(); sdata->required_filtering = sdata->iface_default_filtering; drv_start(local, sdata->required_filtering, &local->addr_filt); return 0; } int mac802154_abort_scan_locked(struct ieee802154_local *local, struct ieee802154_sub_if_data *sdata) { ASSERT_RTNL(); if (!mac802154_is_scanning(local)) return -ESRCH; return mac802154_scan_cleanup_locked(local, sdata, true); } static unsigned int mac802154_scan_get_channel_time(u8 duration_order, u8 symbol_duration) { u64 base_super_frame_duration = (u64)symbol_duration * IEEE802154_SUPERFRAME_PERIOD * IEEE802154_SLOT_PERIOD; return usecs_to_jiffies(base_super_frame_duration * (BIT(duration_order) + 1)); } static void mac802154_flush_queued_beacons(struct ieee802154_local *local) { struct cfg802154_mac_pkt *mac_pkt, *tmp; list_for_each_entry_safe(mac_pkt, tmp, &local->rx_beacon_list, node) { list_del(&mac_pkt->node); kfree_skb(mac_pkt->skb); kfree(mac_pkt); } } static void mac802154_scan_get_next_channel(struct ieee802154_local *local, struct cfg802154_scan_request *scan_req, u8 *channel) { (*channel)++; *channel = find_next_bit((const unsigned long *)&scan_req->channels, IEEE802154_MAX_CHANNEL + 1, *channel); } static int mac802154_scan_find_next_chan(struct ieee802154_local *local, struct cfg802154_scan_request *scan_req, u8 page, u8 *channel) { mac802154_scan_get_next_channel(local, scan_req, channel); if (*channel > IEEE802154_MAX_CHANNEL) return -EINVAL; return 0; } static int mac802154_scan_prepare_beacon_req(struct ieee802154_local *local) { memset(&local->scan_beacon_req, 0, sizeof(local->scan_beacon_req)); local->scan_beacon_req.mhr.fc.type = IEEE802154_FC_TYPE_MAC_CMD; local->scan_beacon_req.mhr.fc.dest_addr_mode = IEEE802154_SHORT_ADDRESSING; local->scan_beacon_req.mhr.fc.version = IEEE802154_2003_STD; local->scan_beacon_req.mhr.fc.source_addr_mode = IEEE802154_NO_ADDRESSING; local->scan_beacon_req.mhr.dest.mode = IEEE802154_ADDR_SHORT; local->scan_beacon_req.mhr.dest.pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST); local->scan_beacon_req.mhr.dest.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST); local->scan_beacon_req.mac_pl.cmd_id = IEEE802154_CMD_BEACON_REQ; return 0; } static int mac802154_transmit_beacon_req(struct ieee802154_local *local, struct ieee802154_sub_if_data *sdata) { struct sk_buff *skb; int ret; skb = alloc_skb(IEEE802154_MAC_CMD_SKB_SZ, GFP_KERNEL); if (!skb) return -ENOBUFS; skb->dev = sdata->dev; ret = ieee802154_mac_cmd_push(skb, &local->scan_beacon_req, NULL, 0); if (ret) { kfree_skb(skb); return ret; } return ieee802154_mlme_tx(local, sdata, skb); } void mac802154_scan_worker(struct work_struct *work) { struct ieee802154_local *local = container_of(work, struct ieee802154_local, scan_work.work); struct cfg802154_scan_request *scan_req; enum nl802154_scan_types scan_req_type; struct ieee802154_sub_if_data *sdata; unsigned int scan_duration = 0; struct wpan_phy *wpan_phy; u8 scan_req_duration; u8 page, channel; int ret; /* Ensure the device receiver is turned off when changing channels * because there is no atomic way to change the channel and know on * which one a beacon might have been received. */ drv_stop(local); synchronize_net(); mac802154_flush_queued_beacons(local); rcu_read_lock(); scan_req = rcu_dereference(local->scan_req); if (unlikely(!scan_req)) { rcu_read_unlock(); return; } sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(scan_req->wpan_dev); /* Wait an arbitrary amount of time in case we cannot use the device */ if (local->suspended || !ieee802154_sdata_running(sdata)) { rcu_read_unlock(); queue_delayed_work(local->mac_wq, &local->scan_work, msecs_to_jiffies(1000)); return; } wpan_phy = scan_req->wpan_phy; scan_req_type = scan_req->type; scan_req_duration = scan_req->duration; /* Look for the next valid chan */ page = local->scan_page; channel = local->scan_channel; do { ret = mac802154_scan_find_next_chan(local, scan_req, page, &channel); if (ret) { rcu_read_unlock(); goto end_scan; } } while (!ieee802154_chan_is_valid(scan_req->wpan_phy, page, channel)); rcu_read_unlock(); /* Bypass the stack on purpose when changing the channel */ rtnl_lock(); ret = drv_set_channel(local, page, channel); rtnl_unlock(); if (ret) { dev_err(&sdata->dev->dev, "Channel change failure during scan, aborting (%d)\n", ret); goto end_scan; } local->scan_page = page; local->scan_channel = channel; rtnl_lock(); ret = drv_start(local, IEEE802154_FILTERING_3_SCAN, &local->addr_filt); rtnl_unlock(); if (ret) { dev_err(&sdata->dev->dev, "Restarting failure after channel change, aborting (%d)\n", ret); goto end_scan; } if (scan_req_type == NL802154_SCAN_ACTIVE) { ret = mac802154_transmit_beacon_req(local, sdata); if (ret) dev_err(&sdata->dev->dev, "Error when transmitting beacon request (%d)\n", ret); } ieee802154_configure_durations(wpan_phy, page, channel); scan_duration = mac802154_scan_get_channel_time(scan_req_duration, wpan_phy->symbol_duration); dev_dbg(&sdata->dev->dev, "Scan page %u channel %u for %ums\n", page, channel, jiffies_to_msecs(scan_duration)); queue_delayed_work(local->mac_wq, &local->scan_work, scan_duration); return; end_scan: rtnl_lock(); mac802154_scan_cleanup_locked(local, sdata, false); rtnl_unlock(); } int mac802154_trigger_scan_locked(struct ieee802154_sub_if_data *sdata, struct cfg802154_scan_request *request) { struct ieee802154_local *local = sdata->local; ASSERT_RTNL(); if (mac802154_is_scanning(local)) return -EBUSY; if (request->type != NL802154_SCAN_PASSIVE && request->type != NL802154_SCAN_ACTIVE) return -EOPNOTSUPP; /* Store scanning parameters */ rcu_assign_pointer(local->scan_req, request); /* Software scanning requires to set promiscuous mode, so we need to * pause the Tx queue during the entire operation. */ ieee802154_mlme_op_pre(local); sdata->required_filtering = IEEE802154_FILTERING_3_SCAN; local->scan_page = request->page; local->scan_channel = -1; set_bit(IEEE802154_IS_SCANNING, &local->ongoing); if (request->type == NL802154_SCAN_ACTIVE) mac802154_scan_prepare_beacon_req(local); nl802154_scan_started(request->wpan_phy, request->wpan_dev); queue_delayed_work(local->mac_wq, &local->scan_work, 0); return 0; } int mac802154_process_beacon(struct ieee802154_local *local, struct sk_buff *skb, u8 page, u8 channel) { struct ieee802154_beacon_hdr *bh = (void *)skb->data; struct ieee802154_addr *src = &mac_cb(skb)->source; struct cfg802154_scan_request *scan_req; struct ieee802154_coord_desc desc; if (skb->len != sizeof(*bh)) return -EINVAL; if (unlikely(src->mode == IEEE802154_ADDR_NONE)) return -EINVAL; dev_dbg(&skb->dev->dev, "BEACON received on page %u channel %u\n", page, channel); memcpy(&desc.addr, src, sizeof(desc.addr)); desc.page = page; desc.channel = channel; desc.link_quality = mac_cb(skb)->lqi; desc.superframe_spec = get_unaligned_le16(skb->data); desc.gts_permit = bh->gts_permit; trace_802154_scan_event(&desc); rcu_read_lock(); scan_req = rcu_dereference(local->scan_req); if (likely(scan_req)) nl802154_scan_event(scan_req->wpan_phy, scan_req->wpan_dev, &desc); rcu_read_unlock(); return 0; } static int mac802154_transmit_beacon(struct ieee802154_local *local, struct wpan_dev *wpan_dev) { struct cfg802154_beacon_request *beacon_req; struct ieee802154_sub_if_data *sdata; struct sk_buff *skb; int ret; /* Update the sequence number */ local->beacon.mhr.seq = atomic_inc_return(&wpan_dev->bsn) & 0xFF; skb = alloc_skb(IEEE802154_BEACON_SKB_SZ, GFP_KERNEL); if (!skb) return -ENOBUFS; rcu_read_lock(); beacon_req = rcu_dereference(local->beacon_req); if (unlikely(!beacon_req)) { rcu_read_unlock(); kfree_skb(skb); return -EINVAL; } sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(beacon_req->wpan_dev); skb->dev = sdata->dev; rcu_read_unlock(); ret = ieee802154_beacon_push(skb, &local->beacon); if (ret) { kfree_skb(skb); return ret; } /* Using the MLME transmission helper for sending beacons is a bit * overkill because we do not really care about the final outcome. * * Even though, going through the whole net stack with a regular * dev_queue_xmit() is not relevant either because we want beacons to be * sent "now" rather than go through the whole net stack scheduling * (qdisc & co). * * Finally, using ieee802154_subif_start_xmit() would only be an option * if we had a generic transmit helper which would acquire the * HARD_TX_LOCK() to prevent buffer handling conflicts with regular * packets. * * So for now we keep it simple and send beacons with our MLME helper, * even if it stops the ieee802154 queue entirely during these * transmissions, wich anyway does not have a huge impact on the * performances given the current design of the stack. */ return ieee802154_mlme_tx(local, sdata, skb); } void mac802154_beacon_worker(struct work_struct *work) { struct ieee802154_local *local = container_of(work, struct ieee802154_local, beacon_work.work); struct cfg802154_beacon_request *beacon_req; struct ieee802154_sub_if_data *sdata; struct wpan_dev *wpan_dev; u8 interval; int ret; rcu_read_lock(); beacon_req = rcu_dereference(local->beacon_req); if (unlikely(!beacon_req)) { rcu_read_unlock(); return; } sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(beacon_req->wpan_dev); /* Wait an arbitrary amount of time in case we cannot use the device */ if (local->suspended || !ieee802154_sdata_running(sdata)) { rcu_read_unlock(); queue_delayed_work(local->mac_wq, &local->beacon_work, msecs_to_jiffies(1000)); return; } wpan_dev = beacon_req->wpan_dev; interval = beacon_req->interval; rcu_read_unlock(); dev_dbg(&sdata->dev->dev, "Sending beacon\n"); ret = mac802154_transmit_beacon(local, wpan_dev); if (ret) dev_err(&sdata->dev->dev, "Beacon could not be transmitted (%d)\n", ret); if (interval < IEEE802154_ACTIVE_SCAN_DURATION) queue_delayed_work(local->mac_wq, &local->beacon_work, local->beacon_interval); } int mac802154_stop_beacons_locked(struct ieee802154_local *local, struct ieee802154_sub_if_data *sdata) { struct wpan_dev *wpan_dev = &sdata->wpan_dev; struct cfg802154_beacon_request *request; ASSERT_RTNL(); if (!mac802154_is_beaconing(local)) return -ESRCH; clear_bit(IEEE802154_IS_BEACONING, &local->ongoing); cancel_delayed_work(&local->beacon_work); request = rcu_replace_pointer(local->beacon_req, NULL, 1); if (!request) return 0; kvfree_rcu_mightsleep(request); nl802154_beaconing_done(wpan_dev); return 0; } int mac802154_send_beacons_locked(struct ieee802154_sub_if_data *sdata, struct cfg802154_beacon_request *request) { struct ieee802154_local *local = sdata->local; struct wpan_dev *wpan_dev = &sdata->wpan_dev; ASSERT_RTNL(); if (mac802154_is_beaconing(local)) mac802154_stop_beacons_locked(local, sdata); /* Store beaconing parameters */ rcu_assign_pointer(local->beacon_req, request); set_bit(IEEE802154_IS_BEACONING, &local->ongoing); memset(&local->beacon, 0, sizeof(local->beacon)); local->beacon.mhr.fc.type = IEEE802154_FC_TYPE_BEACON; local->beacon.mhr.fc.security_enabled = 0; local->beacon.mhr.fc.frame_pending = 0; local->beacon.mhr.fc.ack_request = 0; local->beacon.mhr.fc.intra_pan = 0; local->beacon.mhr.fc.dest_addr_mode = IEEE802154_NO_ADDRESSING; local->beacon.mhr.fc.version = IEEE802154_2003_STD; local->beacon.mhr.fc.source_addr_mode = IEEE802154_EXTENDED_ADDRESSING; atomic_set(&request->wpan_dev->bsn, -1); local->beacon.mhr.source.mode = IEEE802154_ADDR_LONG; local->beacon.mhr.source.pan_id = request->wpan_dev->pan_id; local->beacon.mhr.source.extended_addr = request->wpan_dev->extended_addr; local->beacon.mac_pl.beacon_order = request->interval; if (request->interval <= IEEE802154_MAX_SCAN_DURATION) local->beacon.mac_pl.superframe_order = request->interval; local->beacon.mac_pl.final_cap_slot = 0xf; local->beacon.mac_pl.battery_life_ext = 0; local->beacon.mac_pl.pan_coordinator = !wpan_dev->parent; local->beacon.mac_pl.assoc_permit = 1; if (request->interval == IEEE802154_ACTIVE_SCAN_DURATION) return 0; /* Start the beacon work */ local->beacon_interval = mac802154_scan_get_channel_time(request->interval, request->wpan_phy->symbol_duration); queue_delayed_work(local->mac_wq, &local->beacon_work, 0); return 0; } int mac802154_perform_association(struct ieee802154_sub_if_data *sdata, struct ieee802154_pan_device *coord, __le16 *short_addr) { u64 ceaddr = swab64((__force u64)coord->extended_addr); struct ieee802154_association_req_frame frame = {}; struct ieee802154_local *local = sdata->local; struct wpan_dev *wpan_dev = &sdata->wpan_dev; struct sk_buff *skb; int ret; frame.mhr.fc.type = IEEE802154_FC_TYPE_MAC_CMD; frame.mhr.fc.security_enabled = 0; frame.mhr.fc.frame_pending = 0; frame.mhr.fc.ack_request = 1; /* We always expect an ack here */ frame.mhr.fc.intra_pan = 0; frame.mhr.fc.dest_addr_mode = (coord->mode == IEEE802154_ADDR_LONG) ? IEEE802154_EXTENDED_ADDRESSING : IEEE802154_SHORT_ADDRESSING; frame.mhr.fc.version = IEEE802154_2003_STD; frame.mhr.fc.source_addr_mode = IEEE802154_EXTENDED_ADDRESSING; frame.mhr.source.mode = IEEE802154_ADDR_LONG; frame.mhr.source.pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST); frame.mhr.source.extended_addr = wpan_dev->extended_addr; frame.mhr.dest.mode = coord->mode; frame.mhr.dest.pan_id = coord->pan_id; if (coord->mode == IEEE802154_ADDR_LONG) frame.mhr.dest.extended_addr = coord->extended_addr; else frame.mhr.dest.short_addr = coord->short_addr; frame.mhr.seq = atomic_inc_return(&wpan_dev->dsn) & 0xFF; frame.mac_pl.cmd_id = IEEE802154_CMD_ASSOCIATION_REQ; frame.assoc_req_pl.device_type = 1; frame.assoc_req_pl.power_source = 1; frame.assoc_req_pl.rx_on_when_idle = 1; frame.assoc_req_pl.alloc_addr = 1; skb = alloc_skb(IEEE802154_MAC_CMD_SKB_SZ + sizeof(frame.assoc_req_pl), GFP_KERNEL); if (!skb) return -ENOBUFS; skb->dev = sdata->dev; ret = ieee802154_mac_cmd_push(skb, &frame, &frame.assoc_req_pl, sizeof(frame.assoc_req_pl)); if (ret) { kfree_skb(skb); return ret; } local->assoc_dev = coord; reinit_completion(&local->assoc_done); set_bit(IEEE802154_IS_ASSOCIATING, &local->ongoing); ret = ieee802154_mlme_tx_one_locked(local, sdata, skb); if (ret) { if (ret > 0) ret = (ret == IEEE802154_NO_ACK) ? -EREMOTEIO : -EIO; dev_warn(&sdata->dev->dev, "No ASSOC REQ ACK received from %8phC\n", &ceaddr); goto clear_assoc; } ret = wait_for_completion_killable_timeout(&local->assoc_done, 10 * HZ); if (ret <= 0) { dev_warn(&sdata->dev->dev, "No ASSOC RESP received from %8phC\n", &ceaddr); ret = -ETIMEDOUT; goto clear_assoc; } if (local->assoc_status != IEEE802154_ASSOCIATION_SUCCESSFUL) { if (local->assoc_status == IEEE802154_PAN_AT_CAPACITY) ret = -ERANGE; else ret = -EPERM; dev_warn(&sdata->dev->dev, "Negative ASSOC RESP received from %8phC: %s\n", &ceaddr, local->assoc_status == IEEE802154_PAN_AT_CAPACITY ? "PAN at capacity" : "access denied"); } ret = 0; *short_addr = local->assoc_addr; clear_assoc: clear_bit(IEEE802154_IS_ASSOCIATING, &local->ongoing); local->assoc_dev = NULL; return ret; } int mac802154_process_association_resp(struct ieee802154_sub_if_data *sdata, struct sk_buff *skb) { struct ieee802154_addr *src = &mac_cb(skb)->source; struct ieee802154_addr *dest = &mac_cb(skb)->dest; u64 deaddr = swab64((__force u64)dest->extended_addr); struct ieee802154_local *local = sdata->local; struct wpan_dev *wpan_dev = &sdata->wpan_dev; struct ieee802154_assoc_resp_pl resp_pl = {}; if (skb->len != sizeof(resp_pl)) return -EINVAL; if (unlikely(src->mode != IEEE802154_EXTENDED_ADDRESSING || dest->mode != IEEE802154_EXTENDED_ADDRESSING)) return -EINVAL; if (unlikely(dest->extended_addr != wpan_dev->extended_addr || src->extended_addr != local->assoc_dev->extended_addr)) return -ENODEV; memcpy(&resp_pl, skb->data, sizeof(resp_pl)); local->assoc_addr = resp_pl.short_addr; local->assoc_status = resp_pl.status; dev_dbg(&skb->dev->dev, "ASSOC RESP 0x%x received from %8phC, getting short address %04x\n", local->assoc_status, &deaddr, local->assoc_addr); complete(&local->assoc_done); return 0; } int mac802154_send_disassociation_notif(struct ieee802154_sub_if_data *sdata, struct ieee802154_pan_device *target, u8 reason) { struct ieee802154_disassociation_notif_frame frame = {}; u64 teaddr = swab64((__force u64)target->extended_addr); struct ieee802154_local *local = sdata->local; struct wpan_dev *wpan_dev = &sdata->wpan_dev; struct sk_buff *skb; int ret; frame.mhr.fc.type = IEEE802154_FC_TYPE_MAC_CMD; frame.mhr.fc.security_enabled = 0; frame.mhr.fc.frame_pending = 0; frame.mhr.fc.ack_request = 1; frame.mhr.fc.intra_pan = 1; frame.mhr.fc.dest_addr_mode = (target->mode == IEEE802154_ADDR_LONG) ? IEEE802154_EXTENDED_ADDRESSING : IEEE802154_SHORT_ADDRESSING; frame.mhr.fc.version = IEEE802154_2003_STD; frame.mhr.fc.source_addr_mode = IEEE802154_EXTENDED_ADDRESSING; frame.mhr.source.mode = IEEE802154_ADDR_LONG; frame.mhr.source.pan_id = wpan_dev->pan_id; frame.mhr.source.extended_addr = wpan_dev->extended_addr; frame.mhr.dest.mode = target->mode; frame.mhr.dest.pan_id = wpan_dev->pan_id; if (target->mode == IEEE802154_ADDR_LONG) frame.mhr.dest.extended_addr = target->extended_addr; else frame.mhr.dest.short_addr = target->short_addr; frame.mhr.seq = atomic_inc_return(&wpan_dev->dsn) & 0xFF; frame.mac_pl.cmd_id = IEEE802154_CMD_DISASSOCIATION_NOTIFY; frame.disassoc_pl = reason; skb = alloc_skb(IEEE802154_MAC_CMD_SKB_SZ + sizeof(frame.disassoc_pl), GFP_KERNEL); if (!skb) return -ENOBUFS; skb->dev = sdata->dev; ret = ieee802154_mac_cmd_push(skb, &frame, &frame.disassoc_pl, sizeof(frame.disassoc_pl)); if (ret) { kfree_skb(skb); return ret; } ret = ieee802154_mlme_tx_one_locked(local, sdata, skb); if (ret) { dev_warn(&sdata->dev->dev, "No DISASSOC ACK received from %8phC\n", &teaddr); if (ret > 0) ret = (ret == IEEE802154_NO_ACK) ? -EREMOTEIO : -EIO; return ret; } dev_dbg(&sdata->dev->dev, "DISASSOC ACK received from %8phC\n", &teaddr); return 0; } static int mac802154_send_association_resp_locked(struct ieee802154_sub_if_data *sdata, struct ieee802154_pan_device *target, struct ieee802154_assoc_resp_pl *assoc_resp_pl) { u64 teaddr = swab64((__force u64)target->extended_addr); struct ieee802154_association_resp_frame frame = {}; struct ieee802154_local *local = sdata->local; struct wpan_dev *wpan_dev = &sdata->wpan_dev; struct sk_buff *skb; int ret; frame.mhr.fc.type = IEEE802154_FC_TYPE_MAC_CMD; frame.mhr.fc.security_enabled = 0; frame.mhr.fc.frame_pending = 0; frame.mhr.fc.ack_request = 1; /* We always expect an ack here */ frame.mhr.fc.intra_pan = 1; frame.mhr.fc.dest_addr_mode = IEEE802154_EXTENDED_ADDRESSING; frame.mhr.fc.version = IEEE802154_2003_STD; frame.mhr.fc.source_addr_mode = IEEE802154_EXTENDED_ADDRESSING; frame.mhr.source.mode = IEEE802154_ADDR_LONG; frame.mhr.source.extended_addr = wpan_dev->extended_addr; frame.mhr.dest.mode = IEEE802154_ADDR_LONG; frame.mhr.dest.pan_id = wpan_dev->pan_id; frame.mhr.dest.extended_addr = target->extended_addr; frame.mhr.seq = atomic_inc_return(&wpan_dev->dsn) & 0xFF; frame.mac_pl.cmd_id = IEEE802154_CMD_ASSOCIATION_RESP; skb = alloc_skb(IEEE802154_MAC_CMD_SKB_SZ + sizeof(*assoc_resp_pl), GFP_KERNEL); if (!skb) return -ENOBUFS; skb->dev = sdata->dev; ret = ieee802154_mac_cmd_push(skb, &frame, assoc_resp_pl, sizeof(*assoc_resp_pl)); if (ret) { kfree_skb(skb); return ret; } ret = ieee802154_mlme_tx_locked(local, sdata, skb); if (ret) { dev_warn(&sdata->dev->dev, "No ASSOC RESP ACK received from %8phC\n", &teaddr); if (ret > 0) ret = (ret == IEEE802154_NO_ACK) ? -EREMOTEIO : -EIO; return ret; } return 0; } int mac802154_process_association_req(struct ieee802154_sub_if_data *sdata, struct sk_buff *skb) { struct wpan_dev *wpan_dev = &sdata->wpan_dev; struct ieee802154_addr *src = &mac_cb(skb)->source; struct ieee802154_addr *dest = &mac_cb(skb)->dest; struct ieee802154_assoc_resp_pl assoc_resp_pl = {}; struct ieee802154_assoc_req_pl assoc_req_pl; struct ieee802154_pan_device *child, *exchild; struct ieee802154_addr tmp = {}; u64 ceaddr; int ret; if (skb->len != sizeof(assoc_req_pl)) return -EINVAL; if (unlikely(src->mode != IEEE802154_EXTENDED_ADDRESSING)) return -EINVAL; if (unlikely(dest->pan_id != wpan_dev->pan_id)) return -ENODEV; if (dest->mode == IEEE802154_EXTENDED_ADDRESSING && unlikely(dest->extended_addr != wpan_dev->extended_addr)) return -ENODEV; else if (dest->mode == IEEE802154_SHORT_ADDRESSING && unlikely(dest->short_addr != wpan_dev->short_addr)) return -ENODEV; if (wpan_dev->parent) { dev_dbg(&sdata->dev->dev, "Ignoring ASSOC REQ, not the PAN coordinator\n"); return -ENODEV; } mutex_lock(&wpan_dev->association_lock); memcpy(&assoc_req_pl, skb->data, sizeof(assoc_req_pl)); if (assoc_req_pl.assoc_type) { dev_err(&skb->dev->dev, "Fast associations not supported yet\n"); ret = -EOPNOTSUPP; goto unlock; } child = kzalloc(sizeof(*child), GFP_KERNEL); if (!child) { ret = -ENOMEM; goto unlock; } child->extended_addr = src->extended_addr; child->mode = IEEE802154_EXTENDED_ADDRESSING; ceaddr = swab64((__force u64)child->extended_addr); if (wpan_dev->nchildren >= wpan_dev->max_associations) { if (!wpan_dev->max_associations) assoc_resp_pl.status = IEEE802154_PAN_ACCESS_DENIED; else assoc_resp_pl.status = IEEE802154_PAN_AT_CAPACITY; assoc_resp_pl.short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST); dev_dbg(&sdata->dev->dev, "Refusing ASSOC REQ from child %8phC, %s\n", &ceaddr, assoc_resp_pl.status == IEEE802154_PAN_ACCESS_DENIED ? "access denied" : "too many children"); } else { assoc_resp_pl.status = IEEE802154_ASSOCIATION_SUCCESSFUL; if (assoc_req_pl.alloc_addr) { assoc_resp_pl.short_addr = cfg802154_get_free_short_addr(wpan_dev); child->mode = IEEE802154_SHORT_ADDRESSING; } else { assoc_resp_pl.short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC); } child->short_addr = assoc_resp_pl.short_addr; dev_dbg(&sdata->dev->dev, "Accepting ASSOC REQ from child %8phC, providing short address 0x%04x\n", &ceaddr, le16_to_cpu(child->short_addr)); } ret = mac802154_send_association_resp_locked(sdata, child, &assoc_resp_pl); if (ret || assoc_resp_pl.status != IEEE802154_ASSOCIATION_SUCCESSFUL) { kfree(child); goto unlock; } dev_dbg(&sdata->dev->dev, "Successful association with new child %8phC\n", &ceaddr); /* Ensure this child is not already associated (might happen due to * retransmissions), in this case drop the ex structure. */ tmp.mode = child->mode; tmp.extended_addr = child->extended_addr; exchild = cfg802154_device_is_child(wpan_dev, &tmp); if (exchild) { dev_dbg(&sdata->dev->dev, "Child %8phC was already known\n", &ceaddr); list_del(&exchild->node); } list_add(&child->node, &wpan_dev->children); wpan_dev->nchildren++; unlock: mutex_unlock(&wpan_dev->association_lock); return ret; } int mac802154_process_disassociation_notif(struct ieee802154_sub_if_data *sdata, struct sk_buff *skb) { struct ieee802154_addr *src = &mac_cb(skb)->source; struct ieee802154_addr *dest = &mac_cb(skb)->dest; struct wpan_dev *wpan_dev = &sdata->wpan_dev; struct ieee802154_pan_device *child; struct ieee802154_addr target; bool parent; u64 teaddr; if (skb->len != sizeof(u8)) return -EINVAL; if (unlikely(src->mode != IEEE802154_EXTENDED_ADDRESSING)) return -EINVAL; if (dest->mode == IEEE802154_EXTENDED_ADDRESSING && unlikely(dest->extended_addr != wpan_dev->extended_addr)) return -ENODEV; else if (dest->mode == IEEE802154_SHORT_ADDRESSING && unlikely(dest->short_addr != wpan_dev->short_addr)) return -ENODEV; if (dest->pan_id != wpan_dev->pan_id) return -ENODEV; target.mode = IEEE802154_EXTENDED_ADDRESSING; target.extended_addr = src->extended_addr; teaddr = swab64((__force u64)target.extended_addr); dev_dbg(&skb->dev->dev, "Processing DISASSOC NOTIF from %8phC\n", &teaddr); mutex_lock(&wpan_dev->association_lock); parent = cfg802154_device_is_parent(wpan_dev, &target); if (!parent) child = cfg802154_device_is_child(wpan_dev, &target); if (!parent && !child) { mutex_unlock(&wpan_dev->association_lock); return -EINVAL; } if (parent) { kfree(wpan_dev->parent); wpan_dev->parent = NULL; } else { list_del(&child->node); kfree(child); wpan_dev->nchildren--; } mutex_unlock(&wpan_dev->association_lock); return 0; }
2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 // SPDX-License-Identifier: GPL-2.0 /* * Generic USB GNSS receiver driver * * Copyright (C) 2021 Johan Hovold <johan@kernel.org> */ #include <linux/errno.h> #include <linux/gnss.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #define GNSS_USB_READ_BUF_LEN 512 #define GNSS_USB_WRITE_TIMEOUT 1000 static const struct usb_device_id gnss_usb_id_table[] = { { USB_DEVICE(0x1199, 0xb000) }, /* Sierra Wireless XM1210 */ { } }; MODULE_DEVICE_TABLE(usb, gnss_usb_id_table); struct gnss_usb { struct usb_device *udev; struct usb_interface *intf; struct gnss_device *gdev; struct urb *read_urb; unsigned int write_pipe; }; static void gnss_usb_rx_complete(struct urb *urb) { struct gnss_usb *gusb = urb->context; struct gnss_device *gdev = gusb->gdev; int status = urb->status; int len; int ret; switch (status) { case 0: break; case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: dev_dbg(&gdev->dev, "urb stopped: %d\n", status); return; case -EPIPE: dev_err(&gdev->dev, "urb stopped: %d\n", status); return; default: dev_dbg(&gdev->dev, "nonzero urb status: %d\n", status); goto resubmit; } len = urb->actual_length; if (len == 0) goto resubmit; ret = gnss_insert_raw(gdev, urb->transfer_buffer, len); if (ret < len) dev_dbg(&gdev->dev, "dropped %d bytes\n", len - ret); resubmit: ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret && ret != -EPERM && ret != -ENODEV) dev_err(&gdev->dev, "failed to resubmit urb: %d\n", ret); } static int gnss_usb_open(struct gnss_device *gdev) { struct gnss_usb *gusb = gnss_get_drvdata(gdev); int ret; ret = usb_submit_urb(gusb->read_urb, GFP_KERNEL); if (ret) { if (ret != -EPERM && ret != -ENODEV) dev_err(&gdev->dev, "failed to submit urb: %d\n", ret); return ret; } return 0; } static void gnss_usb_close(struct gnss_device *gdev) { struct gnss_usb *gusb = gnss_get_drvdata(gdev); usb_kill_urb(gusb->read_urb); } static int gnss_usb_write_raw(struct gnss_device *gdev, const unsigned char *buf, size_t count) { struct gnss_usb *gusb = gnss_get_drvdata(gdev); void *tbuf; int ret; tbuf = kmemdup(buf, count, GFP_KERNEL); if (!tbuf) return -ENOMEM; ret = usb_bulk_msg(gusb->udev, gusb->write_pipe, tbuf, count, NULL, GNSS_USB_WRITE_TIMEOUT); kfree(tbuf); if (ret) return ret; return count; } static const struct gnss_operations gnss_usb_gnss_ops = { .open = gnss_usb_open, .close = gnss_usb_close, .write_raw = gnss_usb_write_raw, }; static int gnss_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *in, *out; struct gnss_device *gdev; struct gnss_usb *gusb; struct urb *urb; size_t buf_len; void *buf; int ret; ret = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, NULL); if (ret) return ret; gusb = kzalloc(sizeof(*gusb), GFP_KERNEL); if (!gusb) return -ENOMEM; gdev = gnss_allocate_device(&intf->dev); if (!gdev) { ret = -ENOMEM; goto err_free_gusb; } gdev->ops = &gnss_usb_gnss_ops; gdev->type = GNSS_TYPE_NMEA; gnss_set_drvdata(gdev, gusb); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { ret = -ENOMEM; goto err_put_gdev; } buf_len = max(usb_endpoint_maxp(in), GNSS_USB_READ_BUF_LEN); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_free_urb; } usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, usb_endpoint_num(in)), buf, buf_len, gnss_usb_rx_complete, gusb); gusb->intf = intf; gusb->udev = udev; gusb->gdev = gdev; gusb->read_urb = urb; gusb->write_pipe = usb_sndbulkpipe(udev, usb_endpoint_num(out)); ret = gnss_register_device(gdev); if (ret) goto err_free_buf; usb_set_intfdata(intf, gusb); return 0; err_free_buf: kfree(buf); err_free_urb: usb_free_urb(urb); err_put_gdev: gnss_put_device(gdev); err_free_gusb: kfree(gusb); return ret; } static void gnss_usb_disconnect(struct usb_interface *intf) { struct gnss_usb *gusb = usb_get_intfdata(intf); gnss_deregister_device(gusb->gdev); kfree(gusb->read_urb->transfer_buffer); usb_free_urb(gusb->read_urb); gnss_put_device(gusb->gdev); kfree(gusb); } static struct usb_driver gnss_usb_driver = { .name = "gnss-usb", .probe = gnss_usb_probe, .disconnect = gnss_usb_disconnect, .id_table = gnss_usb_id_table, }; module_usb_driver(gnss_usb_driver); MODULE_AUTHOR("Johan Hovold <johan@kernel.org>"); MODULE_DESCRIPTION("Generic USB GNSS receiver driver"); MODULE_LICENSE("GPL v2");
117 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 // SPDX-License-Identifier: GPL-2.0-or-later /* * Virtio PCI driver - common functionality for all device versions * * This module allows virtio devices to be used over a virtual PCI device. * This can be used with QEMU based VMMs like KVM or Xen. * * Copyright IBM Corp. 2007 * Copyright Red Hat, Inc. 2014 * * Authors: * Anthony Liguori <aliguori@us.ibm.com> * Rusty Russell <rusty@rustcorp.com.au> * Michael S. Tsirkin <mst@redhat.com> */ #include "virtio_pci_common.h" static bool force_legacy = false; #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY) module_param(force_legacy, bool, 0444); MODULE_PARM_DESC(force_legacy, "Force legacy mode for transitional virtio 1 devices"); #endif bool vp_is_avq(struct virtio_device *vdev, unsigned int index) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) return false; return index == vp_dev->admin_vq.vq_index; } /* wait for pending irq handlers */ void vp_synchronize_vectors(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i; if (vp_dev->intx_enabled) synchronize_irq(vp_dev->pci_dev->irq); for (i = 0; i < vp_dev->msix_vectors; ++i) synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i)); } /* the notify function used when creating a virt queue */ bool vp_notify(struct virtqueue *vq) { /* we write the queue's selector into the notification register to * signal the other end */ iowrite16(vq->index, (void __iomem *)vq->priv); return true; } /* Notify all slow path virtqueues on an interrupt. */ static void vp_vring_slow_path_interrupt(int irq, struct virtio_pci_device *vp_dev) { struct virtio_pci_vq_info *info; unsigned long flags; spin_lock_irqsave(&vp_dev->lock, flags); list_for_each_entry(info, &vp_dev->slow_virtqueues, node) vring_interrupt(irq, info->vq); spin_unlock_irqrestore(&vp_dev->lock, flags); } /* Handle a configuration change: Tell driver if it wants to know. */ static irqreturn_t vp_config_changed(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; virtio_config_changed(&vp_dev->vdev); vp_vring_slow_path_interrupt(irq, vp_dev); return IRQ_HANDLED; } /* Notify all virtqueues on an interrupt. */ static irqreturn_t vp_vring_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; struct virtio_pci_vq_info *info; irqreturn_t ret = IRQ_NONE; unsigned long flags; spin_lock_irqsave(&vp_dev->lock, flags); list_for_each_entry(info, &vp_dev->virtqueues, node) { if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) ret = IRQ_HANDLED; } spin_unlock_irqrestore(&vp_dev->lock, flags); return ret; } /* A small wrapper to also acknowledge the interrupt when it's handled. * I really need an EIO hook for the vring so I can ack the interrupt once we * know that we'll be handling the IRQ but before we invoke the callback since * the callback may notify the host which results in the host attempting to * raise an interrupt that we would then mask once we acknowledged the * interrupt. */ static irqreturn_t vp_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; u8 isr; /* reading the ISR has the effect of also clearing it so it's very * important to save off the value. */ isr = ioread8(vp_dev->isr); /* It's definitely not us if the ISR was not high */ if (!isr) return IRQ_NONE; /* Configuration change? Tell driver if it wants to know. */ if (isr & VIRTIO_PCI_ISR_CONFIG) vp_config_changed(irq, opaque); return vp_vring_interrupt(irq, opaque); } static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, bool per_vq_vectors, struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); const char *name = dev_name(&vp_dev->vdev.dev); unsigned int flags = PCI_IRQ_MSIX; unsigned int i, v; int err = -ENOMEM; vp_dev->msix_vectors = nvectors; vp_dev->msix_names = kmalloc_array(nvectors, sizeof(*vp_dev->msix_names), GFP_KERNEL); if (!vp_dev->msix_names) goto error; vp_dev->msix_affinity_masks = kcalloc(nvectors, sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL); if (!vp_dev->msix_affinity_masks) goto error; for (i = 0; i < nvectors; ++i) if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], GFP_KERNEL)) goto error; if (!per_vq_vectors) desc = NULL; if (desc) { flags |= PCI_IRQ_AFFINITY; desc->pre_vectors++; /* virtio config vector */ } err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, nvectors, flags, desc); if (err < 0) goto error; vp_dev->msix_enabled = 1; /* Set the vector used for configuration */ v = vp_dev->msix_used_vectors; snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, "%s-config", name); err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), vp_config_changed, 0, vp_dev->msix_names[v], vp_dev); if (err) goto error; ++vp_dev->msix_used_vectors; v = vp_dev->config_vector(vp_dev, v); /* Verify we had enough resources to assign the vector */ if (v == VIRTIO_MSI_NO_VECTOR) { err = -EBUSY; goto error; } if (!per_vq_vectors) { /* Shared vector for all VQs */ v = vp_dev->msix_used_vectors; snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, "%s-virtqueues", name); err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), vp_vring_interrupt, 0, vp_dev->msix_names[v], vp_dev); if (err) goto error; ++vp_dev->msix_used_vectors; } return 0; error: return err; } static bool vp_is_slow_path_vector(u16 msix_vec) { return msix_vec == VP_MSIX_CONFIG_VECTOR; } static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index, void (*callback)(struct virtqueue *vq), const char *name, bool ctx, u16 msix_vec, struct virtio_pci_vq_info **p_info) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL); struct virtqueue *vq; unsigned long flags; /* fill out our structure that represents an active queue */ if (!info) return ERR_PTR(-ENOMEM); vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx, msix_vec); if (IS_ERR(vq)) goto out_info; info->vq = vq; if (callback) { spin_lock_irqsave(&vp_dev->lock, flags); if (!vp_is_slow_path_vector(msix_vec)) list_add(&info->node, &vp_dev->virtqueues); else list_add(&info->node, &vp_dev->slow_virtqueues); spin_unlock_irqrestore(&vp_dev->lock, flags); } else { INIT_LIST_HEAD(&info->node); } *p_info = info; return vq; out_info: kfree(info); return vq; } static void vp_del_vq(struct virtqueue *vq, struct virtio_pci_vq_info *info) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); unsigned long flags; /* * If it fails during re-enable reset vq. This way we won't rejoin * info->node to the queue. Prevent unexpected irqs. */ if (!vq->reset) { spin_lock_irqsave(&vp_dev->lock, flags); list_del(&info->node); spin_unlock_irqrestore(&vp_dev->lock, flags); } vp_dev->del_vq(info); kfree(info); } /* the config->del_vqs() implementation */ void vp_del_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_vq_info *info; struct virtqueue *vq, *n; int i; list_for_each_entry_safe(vq, n, &vdev->vqs, list) { info = vp_is_avq(vdev, vq->index) ? vp_dev->admin_vq.info : vp_dev->vqs[vq->index]; if (vp_dev->per_vq_vectors) { int v = info->msix_vector; if (v != VIRTIO_MSI_NO_VECTOR && !vp_is_slow_path_vector(v)) { int irq = pci_irq_vector(vp_dev->pci_dev, v); irq_update_affinity_hint(irq, NULL); free_irq(irq, vq); } } vp_del_vq(vq, info); } vp_dev->per_vq_vectors = false; if (vp_dev->intx_enabled) { free_irq(vp_dev->pci_dev->irq, vp_dev); vp_dev->intx_enabled = 0; } for (i = 0; i < vp_dev->msix_used_vectors; ++i) free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev); if (vp_dev->msix_affinity_masks) { for (i = 0; i < vp_dev->msix_vectors; i++) free_cpumask_var(vp_dev->msix_affinity_masks[i]); } if (vp_dev->msix_enabled) { /* Disable the vector used for configuration */ vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); pci_free_irq_vectors(vp_dev->pci_dev); vp_dev->msix_enabled = 0; } vp_dev->msix_vectors = 0; vp_dev->msix_used_vectors = 0; kfree(vp_dev->msix_names); vp_dev->msix_names = NULL; kfree(vp_dev->msix_affinity_masks); vp_dev->msix_affinity_masks = NULL; kfree(vp_dev->vqs); vp_dev->vqs = NULL; } enum vp_vq_vector_policy { VP_VQ_VECTOR_POLICY_EACH, VP_VQ_VECTOR_POLICY_SHARED_SLOW, VP_VQ_VECTOR_POLICY_SHARED, }; static struct virtqueue * vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx, vq_callback_t *callback, const char *name, bool ctx, bool slow_path, int *allocated_vectors, enum vp_vq_vector_policy vector_policy, struct virtio_pci_vq_info **p_info) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtqueue *vq; u16 msix_vec; int err; if (!callback) msix_vec = VIRTIO_MSI_NO_VECTOR; else if (vector_policy == VP_VQ_VECTOR_POLICY_EACH || (vector_policy == VP_VQ_VECTOR_POLICY_SHARED_SLOW && !slow_path)) msix_vec = (*allocated_vectors)++; else if (vector_policy != VP_VQ_VECTOR_POLICY_EACH && slow_path) msix_vec = VP_MSIX_CONFIG_VECTOR; else msix_vec = VP_MSIX_VQ_VECTOR; vq = vp_setup_vq(vdev, queue_idx, callback, name, ctx, msix_vec, p_info); if (IS_ERR(vq)) return vq; if (vector_policy == VP_VQ_VECTOR_POLICY_SHARED || msix_vec == VIRTIO_MSI_NO_VECTOR || vp_is_slow_path_vector(msix_vec)) return vq; /* allocate per-vq irq if available and necessary */ snprintf(vp_dev->msix_names[msix_vec], sizeof(*vp_dev->msix_names), "%s-%s", dev_name(&vp_dev->vdev.dev), name); err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), vring_interrupt, 0, vp_dev->msix_names[msix_vec], vq); if (err) { vp_del_vq(vq, *p_info); return ERR_PTR(err); } return vq; } static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], struct virtqueue_info vqs_info[], enum vp_vq_vector_policy vector_policy, struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq; struct virtqueue_info *vqi; int i, err, nvectors, allocated_vectors, queue_idx = 0; struct virtqueue *vq; bool per_vq_vectors; u16 avq_num = 0; vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); if (!vp_dev->vqs) return -ENOMEM; if (vp_dev->avq_index) { err = vp_dev->avq_index(vdev, &avq->vq_index, &avq_num); if (err) goto error_find; } per_vq_vectors = vector_policy != VP_VQ_VECTOR_POLICY_SHARED; if (per_vq_vectors) { /* Best option: one for change interrupt, one per vq. */ nvectors = 1; for (i = 0; i < nvqs; ++i) { vqi = &vqs_info[i]; if (vqi->name && vqi->callback) ++nvectors; } if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH) ++nvectors; } else { /* Second best: one for change, shared for all vqs. */ nvectors = 2; } err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors, desc); if (err) goto error_find; vp_dev->per_vq_vectors = per_vq_vectors; allocated_vectors = vp_dev->msix_used_vectors; for (i = 0; i < nvqs; ++i) { vqi = &vqs_info[i]; if (!vqi->name) { vqs[i] = NULL; continue; } vqs[i] = vp_find_one_vq_msix(vdev, queue_idx++, vqi->callback, vqi->name, vqi->ctx, false, &allocated_vectors, vector_policy, &vp_dev->vqs[i]); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); goto error_find; } } if (!avq_num) return 0; sprintf(avq->name, "avq.%u", avq->vq_index); vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done, avq->name, false, true, &allocated_vectors, vector_policy, &vp_dev->admin_vq.info); if (IS_ERR(vq)) { err = PTR_ERR(vq); goto error_find; } return 0; error_find: vp_del_vqs(vdev); return err; } static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], struct virtqueue_info vqs_info[]) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq; int i, err, queue_idx = 0; struct virtqueue *vq; u16 avq_num = 0; vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); if (!vp_dev->vqs) return -ENOMEM; if (vp_dev->avq_index) { err = vp_dev->avq_index(vdev, &avq->vq_index, &avq_num); if (err) goto out_del_vqs; } err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vp_dev); if (err) goto out_del_vqs; vp_dev->intx_enabled = 1; vp_dev->per_vq_vectors = false; for (i = 0; i < nvqs; ++i) { struct virtqueue_info *vqi = &vqs_info[i]; if (!vqi->name) { vqs[i] = NULL; continue; } vqs[i] = vp_setup_vq(vdev, queue_idx++, vqi->callback, vqi->name, vqi->ctx, VIRTIO_MSI_NO_VECTOR, &vp_dev->vqs[i]); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); goto out_del_vqs; } } if (!avq_num) return 0; sprintf(avq->name, "avq.%u", avq->vq_index); vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name, false, VIRTIO_MSI_NO_VECTOR, &vp_dev->admin_vq.info); if (IS_ERR(vq)) { err = PTR_ERR(vq); goto out_del_vqs; } return 0; out_del_vqs: vp_del_vqs(vdev); return err; } /* the config->find_vqs() implementation */ int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], struct virtqueue_info vqs_info[], struct irq_affinity *desc) { int err; /* Try MSI-X with one vector per queue. */ err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info, VP_VQ_VECTOR_POLICY_EACH, desc); if (!err) return 0; /* Fallback: MSI-X with one shared vector for config and * slow path queues, one vector per queue for the rest. */ err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info, VP_VQ_VECTOR_POLICY_SHARED_SLOW, desc); if (!err) return 0; /* Fallback: MSI-X with one vector for config, one shared for queues. */ err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info, VP_VQ_VECTOR_POLICY_SHARED, desc); if (!err) return 0; /* Is there an interrupt? If not give up. */ if (!(to_vp_device(vdev)->pci_dev->irq)) return err; /* Finally fall back to regular interrupts. */ return vp_find_vqs_intx(vdev, nvqs, vqs, vqs_info); } const char *vp_bus_name(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); return pci_name(vp_dev->pci_dev); } /* Setup the affinity for a virtqueue: * - force the affinity for per vq vector * - OR over all affinities for shared MSI * - ignore the affinity request if we're using INTX */ int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask) { struct virtio_device *vdev = vq->vdev; struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; struct cpumask *mask; unsigned int irq; if (!vq->callback) return -EINVAL; if (vp_dev->msix_enabled) { mask = vp_dev->msix_affinity_masks[info->msix_vector]; irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector); if (!cpu_mask) irq_update_affinity_hint(irq, NULL); else { cpumask_copy(mask, cpu_mask); irq_set_affinity_and_hint(irq, mask); } } return 0; } const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); if (!vp_dev->per_vq_vectors || vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR || vp_is_slow_path_vector(vp_dev->vqs[index]->msix_vector)) return NULL; return pci_irq_get_affinity(vp_dev->pci_dev, vp_dev->vqs[index]->msix_vector); } #ifdef CONFIG_PM_SLEEP static int virtio_pci_freeze(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); int ret; ret = virtio_device_freeze(&vp_dev->vdev); if (!ret) pci_disable_device(pci_dev); return ret; } static int virtio_pci_restore(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); int ret; ret = pci_enable_device(pci_dev); if (ret) return ret; pci_set_master(pci_dev); return virtio_device_restore(&vp_dev->vdev); } static bool vp_supports_pm_no_reset(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); u16 pmcsr; if (!pci_dev->pm_cap) return false; pci_read_config_word(pci_dev, pci_dev->pm_cap + PCI_PM_CTRL, &pmcsr); if (PCI_POSSIBLE_ERROR(pmcsr)) { dev_err(dev, "Unable to query pmcsr"); return false; } return pmcsr & PCI_PM_CTRL_NO_SOFT_RESET; } static int virtio_pci_suspend(struct device *dev) { return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_freeze(dev); } static int virtio_pci_resume(struct device *dev) { return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_restore(dev); } static const struct dev_pm_ops virtio_pci_pm_ops = { .suspend = virtio_pci_suspend, .resume = virtio_pci_resume, .freeze = virtio_pci_freeze, .thaw = virtio_pci_restore, .poweroff = virtio_pci_freeze, .restore = virtio_pci_restore, }; #endif /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ static const struct pci_device_id virtio_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) }, { 0 } }; MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); static void virtio_pci_release_dev(struct device *_d) { struct virtio_device *vdev = dev_to_virtio(_d); struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* As struct device is a kobject, it's not safe to * free the memory (including the reference counter itself) * until it's release callback. */ kfree(vp_dev); } static int virtio_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { struct virtio_pci_device *vp_dev, *reg_dev = NULL; int rc; /* allocate our structure and fill it out */ vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); if (!vp_dev) return -ENOMEM; pci_set_drvdata(pci_dev, vp_dev); vp_dev->vdev.dev.parent = &pci_dev->dev; vp_dev->vdev.dev.release = virtio_pci_release_dev; vp_dev->pci_dev = pci_dev; INIT_LIST_HEAD(&vp_dev->virtqueues); INIT_LIST_HEAD(&vp_dev->slow_virtqueues); spin_lock_init(&vp_dev->lock); /* enable the device */ rc = pci_enable_device(pci_dev); if (rc) goto err_enable_device; if (force_legacy) { rc = virtio_pci_legacy_probe(vp_dev); /* Also try modern mode if we can't map BAR0 (no IO space). */ if (rc == -ENODEV || rc == -ENOMEM) rc = virtio_pci_modern_probe(vp_dev); if (rc) goto err_probe; } else { rc = virtio_pci_modern_probe(vp_dev); if (rc == -ENODEV) rc = virtio_pci_legacy_probe(vp_dev); if (rc) goto err_probe; } pci_set_master(pci_dev); rc = register_virtio_device(&vp_dev->vdev); reg_dev = vp_dev; if (rc) goto err_register; return 0; err_register: if (vp_dev->is_legacy) virtio_pci_legacy_remove(vp_dev); else virtio_pci_modern_remove(vp_dev); err_probe: pci_disable_device(pci_dev); err_enable_device: if (reg_dev) put_device(&vp_dev->vdev.dev); else kfree(vp_dev); return rc; } static void virtio_pci_remove(struct pci_dev *pci_dev) { struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); struct device *dev = get_device(&vp_dev->vdev.dev); /* * Device is marked broken on surprise removal so that virtio upper * layers can abort any ongoing operation. */ if (!pci_device_is_present(pci_dev)) virtio_break_device(&vp_dev->vdev); pci_disable_sriov(pci_dev); unregister_virtio_device(&vp_dev->vdev); if (vp_dev->is_legacy) virtio_pci_legacy_remove(vp_dev); else virtio_pci_modern_remove(vp_dev); pci_disable_device(pci_dev); put_device(dev); } static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs) { struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); struct virtio_device *vdev = &vp_dev->vdev; int ret; if (!(vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK)) return -EBUSY; if (!__virtio_test_bit(vdev, VIRTIO_F_SR_IOV)) return -EINVAL; if (pci_vfs_assigned(pci_dev)) return -EPERM; if (num_vfs == 0) { pci_disable_sriov(pci_dev); return 0; } ret = pci_enable_sriov(pci_dev, num_vfs); if (ret < 0) return ret; return num_vfs; } static void virtio_pci_reset_prepare(struct pci_dev *pci_dev) { struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); int ret = 0; ret = virtio_device_reset_prepare(&vp_dev->vdev); if (ret) { if (ret != -EOPNOTSUPP) dev_warn(&pci_dev->dev, "Reset prepare failure: %d", ret); return; } if (pci_is_enabled(pci_dev)) pci_disable_device(pci_dev); } static void virtio_pci_reset_done(struct pci_dev *pci_dev) { struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); int ret; if (pci_is_enabled(pci_dev)) return; ret = pci_enable_device(pci_dev); if (!ret) { pci_set_master(pci_dev); ret = virtio_device_reset_done(&vp_dev->vdev); } if (ret && ret != -EOPNOTSUPP) dev_warn(&pci_dev->dev, "Reset done failure: %d", ret); } static const struct pci_error_handlers virtio_pci_err_handler = { .reset_prepare = virtio_pci_reset_prepare, .reset_done = virtio_pci_reset_done, }; static struct pci_driver virtio_pci_driver = { .name = "virtio-pci", .id_table = virtio_pci_id_table, .probe = virtio_pci_probe, .remove = virtio_pci_remove, #ifdef CONFIG_PM_SLEEP .driver.pm = &virtio_pci_pm_ops, #endif .sriov_configure = virtio_pci_sriov_configure, .err_handler = &virtio_pci_err_handler, }; struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev) { struct virtio_pci_device *pf_vp_dev; pf_vp_dev = pci_iov_get_pf_drvdata(pdev, &virtio_pci_driver); if (IS_ERR(pf_vp_dev)) return NULL; return &pf_vp_dev->vdev; } module_pci_driver(virtio_pci_driver); MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>"); MODULE_DESCRIPTION("virtio-pci"); MODULE_LICENSE("GPL"); MODULE_VERSION("1");
263 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * ALSA sequencer Queue handling * Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl> */ #ifndef __SND_SEQ_QUEUE_H #define __SND_SEQ_QUEUE_H #include "seq_memory.h" #include "seq_prioq.h" #include "seq_timer.h" #include "seq_lock.h" #include <linux/interrupt.h> #include <linux/list.h> #include <linux/bitops.h> #define SEQ_QUEUE_NO_OWNER (-1) struct snd_seq_queue { int queue; /* queue number */ char name[64]; /* name of this queue */ struct snd_seq_prioq *tickq; /* midi tick event queue */ struct snd_seq_prioq *timeq; /* real-time event queue */ struct snd_seq_timer *timer; /* time keeper for this queue */ int owner; /* client that 'owns' the timer */ bool locked; /* timer is only accesibble by owner if set */ bool klocked; /* kernel lock (after START) */ bool check_again; /* concurrent access happened during check */ bool check_blocked; /* queue being checked */ unsigned int flags; /* status flags */ unsigned int info_flags; /* info for sync */ spinlock_t owner_lock; spinlock_t check_lock; /* clients which uses this queue (bitmap) */ DECLARE_BITMAP(clients_bitmap, SNDRV_SEQ_MAX_CLIENTS); unsigned int clients; /* users of this queue */ struct mutex timer_mutex; snd_use_lock_t use_lock; }; /* get the number of current queues */ int snd_seq_queue_get_cur_queues(void); /* delete queues */ void snd_seq_queues_delete(void); /* create new queue (constructor) */ struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int flags); /* delete queue (destructor) */ int snd_seq_queue_delete(int client, int queueid); /* final stage */ void snd_seq_queue_client_leave(int client); /* enqueue a event received from one the clients */ int snd_seq_enqueue_event(struct snd_seq_event_cell *cell, int atomic, int hop); /* Remove events */ void snd_seq_queue_remove_cells(int client, struct snd_seq_remove_events *info); /* return pointer to queue structure for specified id */ struct snd_seq_queue *queueptr(int queueid); /* unlock */ #define queuefree(q) snd_use_lock_free(&(q)->use_lock) DEFINE_FREE(snd_seq_queue, struct snd_seq_queue *, if (!IS_ERR_OR_NULL(_T)) queuefree(_T)) /* return the (first) queue matching with the specified name */ struct snd_seq_queue *snd_seq_queue_find_name(char *name); /* check single queue and dispatch events */ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop); /* access to queue's parameters */ int snd_seq_queue_check_access(int queueid, int client); int snd_seq_queue_timer_set_tempo(int queueid, int client, struct snd_seq_queue_tempo *info); int snd_seq_queue_set_owner(int queueid, int client, int locked); int snd_seq_queue_timer_open(int queueid); int snd_seq_queue_timer_close(int queueid); int snd_seq_queue_use(int queueid, int client, int use); int snd_seq_queue_is_used(int queueid, int client); int snd_seq_control_queue(struct snd_seq_event *ev, int atomic, int hop); #endif
2 2 2 2 1 1 1 1 2 3 3 3 1 2 3 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 // SPDX-License-Identifier: GPL-2.0 /* * When connected to the machine, the Thrustmaster wheels appear as * a «generic» hid gamepad called "Thrustmaster FFB Wheel". * * When in this mode not every functionality of the wheel, like the force feedback, * are available. To enable all functionalities of a Thrustmaster wheel we have to send * to it a specific USB CONTROL request with a code different for each wheel. * * This driver tries to understand which model of Thrustmaster wheel the generic * "Thrustmaster FFB Wheel" really is and then sends the appropriate control code. * * Copyright (c) 2020-2021 Dario Pagani <dario.pagani.146+linuxk@gmail.com> * Copyright (c) 2020-2021 Kim Kuparinen <kimi.h.kuparinen@gmail.com> */ #include <linux/hid.h> #include <linux/usb.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/module.h> /* * These interrupts are used to prevent a nasty crash when initializing the * T300RS. Used in thrustmaster_interrupts(). */ static const u8 setup_0[] = { 0x42, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 setup_1[] = { 0x0a, 0x04, 0x90, 0x03, 0x00, 0x00, 0x00, 0x00 }; static const u8 setup_2[] = { 0x0a, 0x04, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00 }; static const u8 setup_3[] = { 0x0a, 0x04, 0x12, 0x10, 0x00, 0x00, 0x00, 0x00 }; static const u8 setup_4[] = { 0x0a, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00 }; static const u8 *const setup_arr[] = { setup_0, setup_1, setup_2, setup_3, setup_4 }; static const unsigned int setup_arr_sizes[] = { ARRAY_SIZE(setup_0), ARRAY_SIZE(setup_1), ARRAY_SIZE(setup_2), ARRAY_SIZE(setup_3), ARRAY_SIZE(setup_4) }; /* * This struct contains for each type of * Thrustmaster wheel * * Note: The values are stored in the CPU * endianness, the USB protocols always use * little endian; the macro cpu_to_le[BIT]() * must be used when preparing USB packets * and vice-versa */ struct tm_wheel_info { uint16_t wheel_type; /* * See when the USB control out packet is prepared... * @TODO The TMX seems to require multiple control codes to switch. */ uint16_t switch_value; char const *const wheel_name; }; /* * Known wheels. * Note: TMX does not work as it requires 2 control packets */ static const struct tm_wheel_info tm_wheels_infos[] = { {0x0306, 0x0006, "Thrustmaster T150RS"}, {0x0200, 0x0005, "Thrustmaster T300RS (Missing Attachment)"}, {0x0206, 0x0005, "Thrustmaster T300RS"}, {0x0209, 0x0005, "Thrustmaster T300RS (Open Wheel Attachment)"}, {0x020a, 0x0005, "Thrustmaster T300RS (Sparco R383 Mod)"}, {0x0204, 0x0005, "Thrustmaster T300 Ferrari Alcantara Edition"}, {0x0002, 0x0002, "Thrustmaster T500RS"} //{0x0407, 0x0001, "Thrustmaster TMX"} }; static const uint8_t tm_wheels_infos_length = 7; /* * This structs contains (in little endian) the response data * of the wheel to the request 73 * * A sufficient research to understand what each field does is not * beign conducted yet. The position and meaning of fields are a * just a very optimistic guess based on instinct.... */ struct __packed tm_wheel_response { /* * Seems to be the type of packet * - 0x0049 if is data.a (15 bytes) * - 0x0047 if is data.b (7 bytes) */ uint16_t type; union { struct __packed { uint16_t field0; uint16_t field1; /* * Seems to be the model code of the wheel * Read table thrustmaster_wheels to values */ uint16_t model; uint16_t field2; uint16_t field3; uint16_t field4; uint16_t field5; } a; struct __packed { uint16_t field0; uint16_t field1; uint16_t model; } b; } data; }; struct tm_wheel { struct usb_device *usb_dev; struct urb *urb; struct usb_ctrlrequest *model_request; struct tm_wheel_response *response; struct usb_ctrlrequest *change_request; }; /* The control packet to send to wheel */ static const struct usb_ctrlrequest model_request = { .bRequestType = 0xc1, .bRequest = 73, .wValue = 0, .wIndex = 0, .wLength = cpu_to_le16(0x0010) }; static const struct usb_ctrlrequest change_request = { .bRequestType = 0x41, .bRequest = 83, .wValue = 0, // Will be filled by the driver .wIndex = 0, .wLength = 0 }; /* * On some setups initializing the T300RS crashes the kernel, * these interrupts fix that particular issue. So far they haven't caused any * adverse effects in other wheels. */ static void thrustmaster_interrupts(struct hid_device *hdev) { int ret, trans, i, b_ep; u8 *send_buf = kmalloc(256, GFP_KERNEL); struct usb_host_endpoint *ep; struct device *dev = &hdev->dev; struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); if (!send_buf) { hid_err(hdev, "failed allocating send buffer\n"); return; } if (usbif->cur_altsetting->desc.bNumEndpoints < 2) { kfree(send_buf); hid_err(hdev, "Wrong number of endpoints?\n"); return; } ep = &usbif->cur_altsetting->endpoint[1]; b_ep = ep->desc.bEndpointAddress; /* Are the expected endpoints present? */ u8 ep_addr[2] = {b_ep, 0}; if (!usb_check_int_endpoints(usbif, ep_addr)) { kfree(send_buf); hid_err(hdev, "Unexpected non-int endpoint\n"); return; } for (i = 0; i < ARRAY_SIZE(setup_arr); ++i) { memcpy(send_buf, setup_arr[i], setup_arr_sizes[i]); ret = usb_interrupt_msg(usbdev, usb_sndintpipe(usbdev, b_ep), send_buf, setup_arr_sizes[i], &trans, USB_CTRL_SET_TIMEOUT); if (ret) { hid_err(hdev, "setup data couldn't be sent\n"); kfree(send_buf); return; } } kfree(send_buf); } static void thrustmaster_change_handler(struct urb *urb) { struct hid_device *hdev = urb->context; // The wheel seems to kill himself before answering the host and therefore is violating the USB protocol... if (urb->status == 0 || urb->status == -EPROTO || urb->status == -EPIPE) hid_info(hdev, "Success?! The wheel should have been initialized!\n"); else hid_warn(hdev, "URB to change wheel mode seems to have failed with error %d\n", urb->status); } /* * Called by the USB subsystem when the wheel responses to our request * to get [what it seems to be] the wheel's model. * * If the model id is recognized then we send an opportune USB CONTROL REQUEST * to switch the wheel to its full capabilities */ static void thrustmaster_model_handler(struct urb *urb) { struct hid_device *hdev = urb->context; struct tm_wheel *tm_wheel = hid_get_drvdata(hdev); uint16_t model = 0; int i, ret; const struct tm_wheel_info *twi = NULL; if (urb->status) { hid_err(hdev, "URB to get model id failed with error %d\n", urb->status); return; } if (tm_wheel->response->type == cpu_to_le16(0x49)) model = le16_to_cpu(tm_wheel->response->data.a.model); else if (tm_wheel->response->type == cpu_to_le16(0x47)) model = le16_to_cpu(tm_wheel->response->data.b.model); else { hid_err(hdev, "Unknown packet type 0x%x, unable to proceed further with wheel init\n", tm_wheel->response->type); return; } for (i = 0; i < tm_wheels_infos_length && !twi; i++) if (tm_wheels_infos[i].wheel_type == model) twi = tm_wheels_infos + i; if (twi) hid_info(hdev, "Wheel with model id 0x%x is a %s\n", model, twi->wheel_name); else { hid_err(hdev, "Unknown wheel's model id 0x%x, unable to proceed further with wheel init\n", model); return; } tm_wheel->change_request->wValue = cpu_to_le16(twi->switch_value); usb_fill_control_urb( tm_wheel->urb, tm_wheel->usb_dev, usb_sndctrlpipe(tm_wheel->usb_dev, 0), (char *)tm_wheel->change_request, NULL, 0, // We do not expect any response from the wheel thrustmaster_change_handler, hdev ); ret = usb_submit_urb(tm_wheel->urb, GFP_ATOMIC); if (ret) hid_err(hdev, "Error %d while submitting the change URB. I am unable to initialize this wheel...\n", ret); } static void thrustmaster_remove(struct hid_device *hdev) { struct tm_wheel *tm_wheel = hid_get_drvdata(hdev); usb_kill_urb(tm_wheel->urb); kfree(tm_wheel->change_request); kfree(tm_wheel->response); kfree(tm_wheel->model_request); usb_free_urb(tm_wheel->urb); kfree(tm_wheel); hid_hw_stop(hdev); } /* * Function called by HID when a hid Thrustmaster FFB wheel is connected to the host. * This function starts the hid dev, tries to allocate the tm_wheel data structure and * finally send an USB CONTROL REQUEST to the wheel to get [what it seems to be] its * model type. */ static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret = 0; struct tm_wheel *tm_wheel = NULL; if (!hid_is_usb(hdev)) return -EINVAL; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed with error %d\n", ret); goto error0; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed with error %d\n", ret); goto error0; } // Now we allocate the tm_wheel tm_wheel = kzalloc(sizeof(struct tm_wheel), GFP_KERNEL); if (!tm_wheel) { ret = -ENOMEM; goto error1; } tm_wheel->urb = usb_alloc_urb(0, GFP_ATOMIC); if (!tm_wheel->urb) { ret = -ENOMEM; goto error2; } tm_wheel->model_request = kmemdup(&model_request, sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!tm_wheel->model_request) { ret = -ENOMEM; goto error3; } tm_wheel->response = kzalloc(sizeof(struct tm_wheel_response), GFP_KERNEL); if (!tm_wheel->response) { ret = -ENOMEM; goto error4; } tm_wheel->change_request = kmemdup(&change_request, sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!tm_wheel->change_request) { ret = -ENOMEM; goto error5; } tm_wheel->usb_dev = interface_to_usbdev(to_usb_interface(hdev->dev.parent)); hid_set_drvdata(hdev, tm_wheel); thrustmaster_interrupts(hdev); usb_fill_control_urb( tm_wheel->urb, tm_wheel->usb_dev, usb_rcvctrlpipe(tm_wheel->usb_dev, 0), (char *)tm_wheel->model_request, tm_wheel->response, sizeof(struct tm_wheel_response), thrustmaster_model_handler, hdev ); ret = usb_submit_urb(tm_wheel->urb, GFP_ATOMIC); if (ret) { hid_err(hdev, "Error %d while submitting the URB. I am unable to initialize this wheel...\n", ret); goto error6; } return ret; error6: kfree(tm_wheel->change_request); error5: kfree(tm_wheel->response); error4: kfree(tm_wheel->model_request); error3: usb_free_urb(tm_wheel->urb); error2: kfree(tm_wheel); error1: hid_hw_stop(hdev); error0: return ret; } static const struct hid_device_id thrustmaster_devices[] = { { HID_USB_DEVICE(0x044f, 0xb65d)}, {} }; MODULE_DEVICE_TABLE(hid, thrustmaster_devices); static struct hid_driver thrustmaster_driver = { .name = "hid-thrustmaster", .id_table = thrustmaster_devices, .probe = thrustmaster_probe, .remove = thrustmaster_remove, }; module_hid_driver(thrustmaster_driver); MODULE_AUTHOR("Dario Pagani <dario.pagani.146+linuxk@gmail.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Driver to initialize some steering wheel joysticks from Thrustmaster");
3 5 5 2 5 5 2 3 3 3 3 3 3 3 1 2 1 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 3 1 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2000-2002 Vojtech Pavlik <vojtech@ucw.cz> * Copyright (c) 2001-2002, 2007 Johann Deneux <johann.deneux@gmail.com> * * USB/RS232 I-Force joysticks and wheels. */ #include <linux/export.h> #include <linux/unaligned.h> #include "iforce.h" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>, Johann Deneux <johann.deneux@gmail.com>"); MODULE_DESCRIPTION("Core I-Force joysticks and wheels driver"); MODULE_LICENSE("GPL"); static signed short btn_joystick[] = { BTN_TRIGGER, BTN_TOP, BTN_THUMB, BTN_TOP2, BTN_BASE, BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_BASE5, BTN_A, BTN_B, BTN_C, BTN_DEAD, -1 }; static signed short btn_joystick_avb[] = { BTN_TRIGGER, BTN_THUMB, BTN_TOP, BTN_TOP2, BTN_BASE, BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_DEAD, -1 }; static signed short btn_wheel[] = { BTN_GEAR_DOWN, BTN_GEAR_UP, BTN_BASE, BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_BASE5, BTN_BASE6, -1 }; static signed short abs_joystick[] = { ABS_X, ABS_Y, ABS_THROTTLE, ABS_HAT0X, ABS_HAT0Y, -1 }; static signed short abs_joystick_rudder[] = { ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, ABS_HAT0X, ABS_HAT0Y, -1 }; static signed short abs_avb_pegasus[] = { ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, ABS_HAT0X, ABS_HAT0Y, ABS_HAT1X, ABS_HAT1Y, -1 }; static signed short abs_wheel[] = { ABS_WHEEL, ABS_GAS, ABS_BRAKE, ABS_HAT0X, ABS_HAT0Y, -1 }; static signed short ff_iforce[] = { FF_PERIODIC, FF_CONSTANT, FF_SPRING, FF_DAMPER, FF_SQUARE, FF_TRIANGLE, FF_SINE, FF_SAW_UP, FF_SAW_DOWN, FF_GAIN, FF_AUTOCENTER, -1 }; static struct iforce_device iforce_device[] = { { 0x044f, 0xa01c, "Thrustmaster Motor Sport GT", btn_wheel, abs_wheel, ff_iforce }, { 0x046d, 0xc281, "Logitech WingMan Force", btn_joystick, abs_joystick, ff_iforce }, { 0x046d, 0xc291, "Logitech WingMan Formula Force", btn_wheel, abs_wheel, ff_iforce }, { 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_joystick_avb, abs_avb_pegasus, ff_iforce }, { 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_wheel, abs_wheel, ff_iforce }, { 0x05ef, 0x8886, "Boeder Force Feedback Wheel", btn_wheel, abs_wheel, ff_iforce }, { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //? { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //? { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, { 0x06a3, 0xff04, "Saitek R440 Force Wheel", btn_wheel, abs_wheel, ff_iforce }, //? { 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //? { 0x06f8, 0x0001, "Guillemot Jet Leader Force Feedback", btn_joystick, abs_joystick_rudder, ff_iforce }, { 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //? { 0x06f8, 0xa302, "Guillemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //? { 0x06d6, 0x29bc, "Trust Force Feedback Race Master", btn_wheel, abs_wheel, ff_iforce }, { 0x0000, 0x0000, "Unknown I-Force Device [%04x:%04x]", btn_joystick, abs_joystick, ff_iforce } }; static int iforce_playback(struct input_dev *dev, int effect_id, int value) { struct iforce *iforce = input_get_drvdata(dev); struct iforce_core_effect *core_effect = &iforce->core_effects[effect_id]; if (value > 0) set_bit(FF_CORE_SHOULD_PLAY, core_effect->flags); else clear_bit(FF_CORE_SHOULD_PLAY, core_effect->flags); iforce_control_playback(iforce, effect_id, value); return 0; } static void iforce_set_gain(struct input_dev *dev, u16 gain) { struct iforce *iforce = input_get_drvdata(dev); unsigned char data[3]; data[0] = gain >> 9; iforce_send_packet(iforce, FF_CMD_GAIN, data); } static void iforce_set_autocenter(struct input_dev *dev, u16 magnitude) { struct iforce *iforce = input_get_drvdata(dev); unsigned char data[3]; data[0] = 0x03; data[1] = magnitude >> 9; iforce_send_packet(iforce, FF_CMD_AUTOCENTER, data); data[0] = 0x04; data[1] = 0x01; iforce_send_packet(iforce, FF_CMD_AUTOCENTER, data); } /* * Function called when an ioctl is performed on the event dev entry. * It uploads an effect to the device */ static int iforce_upload_effect(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old) { struct iforce *iforce = input_get_drvdata(dev); struct iforce_core_effect *core_effect = &iforce->core_effects[effect->id]; int ret; if (__test_and_set_bit(FF_CORE_IS_USED, core_effect->flags)) { /* Check the effect is not already being updated */ if (test_bit(FF_CORE_UPDATE, core_effect->flags)) return -EAGAIN; } /* * Upload the effect */ switch (effect->type) { case FF_PERIODIC: ret = iforce_upload_periodic(iforce, effect, old); break; case FF_CONSTANT: ret = iforce_upload_constant(iforce, effect, old); break; case FF_SPRING: case FF_DAMPER: ret = iforce_upload_condition(iforce, effect, old); break; default: return -EINVAL; } if (ret == 0) { /* A packet was sent, forbid new updates until we are notified * that the packet was updated */ set_bit(FF_CORE_UPDATE, core_effect->flags); } return ret; } /* * Erases an effect: it frees the effect id and mark as unused the memory * allocated for the parameters */ static int iforce_erase_effect(struct input_dev *dev, int effect_id) { struct iforce *iforce = input_get_drvdata(dev); struct iforce_core_effect *core_effect = &iforce->core_effects[effect_id]; int err = 0; if (test_bit(FF_MOD1_IS_USED, core_effect->flags)) err = release_resource(&core_effect->mod1_chunk); if (!err && test_bit(FF_MOD2_IS_USED, core_effect->flags)) err = release_resource(&core_effect->mod2_chunk); /* TODO: remember to change that if more FF_MOD* bits are added */ core_effect->flags[0] = 0; return err; } static int iforce_open(struct input_dev *dev) { struct iforce *iforce = input_get_drvdata(dev); iforce->xport_ops->start_io(iforce); if (test_bit(EV_FF, dev->evbit)) { /* Enable force feedback */ iforce_send_packet(iforce, FF_CMD_ENABLE, "\004"); } return 0; } static void iforce_close(struct input_dev *dev) { struct iforce *iforce = input_get_drvdata(dev); int i; if (test_bit(EV_FF, dev->evbit)) { /* Check: no effects should be present in memory */ for (i = 0; i < dev->ff->max_effects; i++) { if (test_bit(FF_CORE_IS_USED, iforce->core_effects[i].flags)) { dev_warn(&dev->dev, "%s: Device still owns effects\n", __func__); break; } } /* Disable force feedback playback */ iforce_send_packet(iforce, FF_CMD_ENABLE, "\001"); /* Wait for the command to complete */ wait_event_interruptible(iforce->wait, !test_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags)); } iforce->xport_ops->stop_io(iforce); } int iforce_init_device(struct device *parent, u16 bustype, struct iforce *iforce) { struct input_dev *input_dev; struct ff_device *ff; u8 c[] = "CEOV"; u8 buf[IFORCE_MAX_LENGTH]; size_t len; int i, error; int ff_effects = 0; input_dev = input_allocate_device(); if (!input_dev) return -ENOMEM; init_waitqueue_head(&iforce->wait); spin_lock_init(&iforce->xmit_lock); mutex_init(&iforce->mem_mutex); iforce->xmit.buf = iforce->xmit_data; iforce->dev = input_dev; /* * Input device fields. */ input_dev->id.bustype = bustype; input_dev->dev.parent = parent; input_set_drvdata(input_dev, iforce); input_dev->name = "Unknown I-Force device"; input_dev->open = iforce_open; input_dev->close = iforce_close; /* * On-device memory allocation. */ iforce->device_memory.name = "I-Force device effect memory"; iforce->device_memory.start = 0; iforce->device_memory.end = 200; iforce->device_memory.flags = IORESOURCE_MEM; iforce->device_memory.parent = NULL; iforce->device_memory.child = NULL; iforce->device_memory.sibling = NULL; /* * Wait until device ready - until it sends its first response. */ for (i = 0; i < 20; i++) if (!iforce_get_id_packet(iforce, 'O', buf, &len)) break; if (i == 20) { /* 5 seconds */ dev_err(&input_dev->dev, "Timeout waiting for response from device.\n"); error = -ENODEV; goto fail; } /* * Get device info. */ if (!iforce_get_id_packet(iforce, 'M', buf, &len) && len >= 3) input_dev->id.vendor = get_unaligned_le16(buf + 1); else dev_warn(&iforce->dev->dev, "Device does not respond to id packet M\n"); if (!iforce_get_id_packet(iforce, 'P', buf, &len) && len >= 3) input_dev->id.product = get_unaligned_le16(buf + 1); else dev_warn(&iforce->dev->dev, "Device does not respond to id packet P\n"); if (!iforce_get_id_packet(iforce, 'B', buf, &len) && len >= 3) iforce->device_memory.end = get_unaligned_le16(buf + 1); else dev_warn(&iforce->dev->dev, "Device does not respond to id packet B\n"); if (!iforce_get_id_packet(iforce, 'N', buf, &len) && len >= 2) ff_effects = buf[1]; else dev_warn(&iforce->dev->dev, "Device does not respond to id packet N\n"); /* Check if the device can store more effects than the driver can really handle */ if (ff_effects > IFORCE_EFFECTS_MAX) { dev_warn(&iforce->dev->dev, "Limiting number of effects to %d (device reports %d)\n", IFORCE_EFFECTS_MAX, ff_effects); ff_effects = IFORCE_EFFECTS_MAX; } /* * Display additional info. */ for (i = 0; c[i]; i++) if (!iforce_get_id_packet(iforce, c[i], buf, &len)) iforce_dump_packet(iforce, "info", (FF_CMD_QUERY & 0xff00) | len, buf); /* * Disable spring, enable force feedback. */ iforce_set_autocenter(input_dev, 0); /* * Find appropriate device entry */ for (i = 0; iforce_device[i].idvendor; i++) if (iforce_device[i].idvendor == input_dev->id.vendor && iforce_device[i].idproduct == input_dev->id.product) break; iforce->type = iforce_device + i; input_dev->name = iforce->type->name; /* * Set input device bitfields and ranges. */ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) | BIT_MASK(EV_FF_STATUS); for (i = 0; iforce->type->btn[i] >= 0; i++) set_bit(iforce->type->btn[i], input_dev->keybit); for (i = 0; iforce->type->abs[i] >= 0; i++) { signed short t = iforce->type->abs[i]; switch (t) { case ABS_X: case ABS_Y: case ABS_WHEEL: input_set_abs_params(input_dev, t, -1920, 1920, 16, 128); set_bit(t, input_dev->ffbit); break; case ABS_THROTTLE: case ABS_GAS: case ABS_BRAKE: input_set_abs_params(input_dev, t, 0, 255, 0, 0); break; case ABS_RUDDER: input_set_abs_params(input_dev, t, -128, 127, 0, 0); break; case ABS_HAT0X: case ABS_HAT0Y: case ABS_HAT1X: case ABS_HAT1Y: input_set_abs_params(input_dev, t, -1, 1, 0, 0); break; } } if (ff_effects) { for (i = 0; iforce->type->ff[i] >= 0; i++) set_bit(iforce->type->ff[i], input_dev->ffbit); error = input_ff_create(input_dev, ff_effects); if (error) goto fail; ff = input_dev->ff; ff->upload = iforce_upload_effect; ff->erase = iforce_erase_effect; ff->set_gain = iforce_set_gain; ff->set_autocenter = iforce_set_autocenter; ff->playback = iforce_playback; } /* * Register input device. */ error = input_register_device(iforce->dev); if (error) goto fail; return 0; fail: input_free_device(input_dev); return error; } EXPORT_SYMBOL(iforce_init_device);
120 364 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 /* * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright (c) 2009-2010, Code Aurora Forum. * All rights reserved. * * Author: Rickard E. (Rik) Faith <faith@valinux.com> * Author: Gareth Hughes <gareth@valinux.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _DRM_FILE_H_ #define _DRM_FILE_H_ #include <linux/types.h> #include <linux/completion.h> #include <linux/idr.h> #include <uapi/drm/drm.h> #include <drm/drm_prime.h> struct dma_fence; struct drm_file; struct drm_device; struct drm_printer; struct device; struct file; extern struct xarray drm_minors_xa; /* * FIXME: Not sure we want to have drm_minor here in the end, but to avoid * header include loops we need it here for now. */ /* Note that the values of this enum are ABI (it determines * /dev/dri/renderD* numbers). * * Setting DRM_MINOR_ACCEL to 32 gives enough space for more drm minors to * be implemented before we hit any future */ enum drm_minor_type { DRM_MINOR_PRIMARY = 0, DRM_MINOR_CONTROL = 1, DRM_MINOR_RENDER = 2, DRM_MINOR_ACCEL = 32, }; /** * struct drm_minor - DRM device minor structure * * This structure represents a DRM minor number for device nodes in /dev. * Entirely opaque to drivers and should never be inspected directly by drivers. * Drivers instead should only interact with &struct drm_file and of course * &struct drm_device, which is also where driver-private data and resources can * be attached to. */ struct drm_minor { /* private: */ int index; /* Minor device number */ int type; /* Control or render or accel */ struct device *kdev; /* Linux device */ struct drm_device *dev; struct dentry *debugfs_symlink; struct dentry *debugfs_root; }; /** * struct drm_pending_event - Event queued up for userspace to read * * This represents a DRM event. Drivers can use this as a generic completion * mechanism, which supports kernel-internal &struct completion, &struct dma_fence * and also the DRM-specific &struct drm_event delivery mechanism. */ struct drm_pending_event { /** * @completion: * * Optional pointer to a kernel internal completion signalled when * drm_send_event() is called, useful to internally synchronize with * nonblocking operations. */ struct completion *completion; /** * @completion_release: * * Optional callback currently only used by the atomic modeset helpers * to clean up the reference count for the structure @completion is * stored in. */ void (*completion_release)(struct completion *completion); /** * @event: * * Pointer to the actual event that should be sent to userspace to be * read using drm_read(). Can be optional, since nowadays events are * also used to signal kernel internal threads with @completion or DMA * transactions using @fence. */ struct drm_event *event; /** * @fence: * * Optional DMA fence to unblock other hardware transactions which * depend upon the nonblocking DRM operation this event represents. */ struct dma_fence *fence; /** * @file_priv: * * &struct drm_file where @event should be delivered to. Only set when * @event is set. */ struct drm_file *file_priv; /** * @link: * * Double-linked list to keep track of this event. Can be used by the * driver up to the point when it calls drm_send_event(), after that * this list entry is owned by the core for its own book-keeping. */ struct list_head link; /** * @pending_link: * * Entry on &drm_file.pending_event_list, to keep track of all pending * events for @file_priv, to allow correct unwinding of them when * userspace closes the file before the event is delivered. */ struct list_head pending_link; }; /** * struct drm_file - DRM file private data * * This structure tracks DRM state per open file descriptor. */ struct drm_file { /** * @authenticated: * * Whether the client is allowed to submit rendering, which for legacy * nodes means it must be authenticated. * * See also the :ref:`section on primary nodes and authentication * <drm_primary_node>`. */ bool authenticated; /** * @stereo_allowed: * * True when the client has asked us to expose stereo 3D mode flags. */ bool stereo_allowed; /** * @universal_planes: * * True if client understands CRTC primary planes and cursor planes * in the plane list. Automatically set when @atomic is set. */ bool universal_planes; /** @atomic: True if client understands atomic properties. */ bool atomic; /** * @aspect_ratio_allowed: * * True, if client can handle picture aspect ratios, and has requested * to pass this information along with the mode. */ bool aspect_ratio_allowed; /** * @writeback_connectors: * * True if client understands writeback connectors */ bool writeback_connectors; /** * @was_master: * * This client has or had, master capability. Protected by struct * &drm_device.master_mutex. * * This is used to ensure that CAP_SYS_ADMIN is not enforced, if the * client is or was master in the past. */ bool was_master; /** * @is_master: * * This client is the creator of @master. Protected by struct * &drm_device.master_mutex. * * See also the :ref:`section on primary nodes and authentication * <drm_primary_node>`. */ bool is_master; /** * @supports_virtualized_cursor_plane: * * This client is capable of handling the cursor plane with the * restrictions imposed on it by the virtualized drivers. * * This implies that the cursor plane has to behave like a cursor * i.e. track cursor movement. It also requires setting of the * hotspot properties by the client on the cursor plane. */ bool supports_virtualized_cursor_plane; /** * @master: * * Master this node is currently associated with. Protected by struct * &drm_device.master_mutex, and serialized by @master_lookup_lock. * * Only relevant if drm_is_primary_client() returns true. Note that * this only matches &drm_device.master if the master is the currently * active one. * * To update @master, both &drm_device.master_mutex and * @master_lookup_lock need to be held, therefore holding either of * them is safe and enough for the read side. * * When dereferencing this pointer, either hold struct * &drm_device.master_mutex for the duration of the pointer's use, or * use drm_file_get_master() if struct &drm_device.master_mutex is not * currently held and there is no other need to hold it. This prevents * @master from being freed during use. * * See also @authentication and @is_master and the :ref:`section on * primary nodes and authentication <drm_primary_node>`. */ struct drm_master *master; /** @master_lookup_lock: Serializes @master. */ spinlock_t master_lookup_lock; /** * @pid: Process that is using this file. * * Must only be dereferenced under a rcu_read_lock or equivalent. * * Updates are guarded with dev->filelist_mutex and reference must be * dropped after a RCU grace period to accommodate lockless readers. */ struct pid __rcu *pid; /** @client_id: A unique id for fdinfo */ u64 client_id; /** @magic: Authentication magic, see @authenticated. */ drm_magic_t magic; /** * @lhead: * * List of all open files of a DRM device, linked into * &drm_device.filelist. Protected by &drm_device.filelist_mutex. */ struct list_head lhead; /** @minor: &struct drm_minor for this file. */ struct drm_minor *minor; /** * @object_idr: * * Mapping of mm object handles to object pointers. Used by the GEM * subsystem. Protected by @table_lock. * * Note that allocated entries might be NULL as a transient state when * creating or deleting a handle. */ struct idr object_idr; /** @table_lock: Protects @object_idr. */ spinlock_t table_lock; /** @syncobj_idr: Mapping of sync object handles to object pointers. */ struct idr syncobj_idr; /** @syncobj_table_lock: Protects @syncobj_idr. */ spinlock_t syncobj_table_lock; /** @filp: Pointer to the core file structure. */ struct file *filp; /** * @driver_priv: * * Optional pointer for driver private data. Can be allocated in * &drm_driver.open and should be freed in &drm_driver.postclose. */ void *driver_priv; /** * @fbs: * * List of &struct drm_framebuffer associated with this file, using the * &drm_framebuffer.filp_head entry. * * Protected by @fbs_lock. Note that the @fbs list holds a reference on * the framebuffer object to prevent it from untimely disappearing. */ struct list_head fbs; /** @fbs_lock: Protects @fbs. */ struct mutex fbs_lock; /** * @blobs: * * User-created blob properties; this retains a reference on the * property. * * Protected by @drm_mode_config.blob_lock; */ struct list_head blobs; /** @event_wait: Waitqueue for new events added to @event_list. */ wait_queue_head_t event_wait; /** * @pending_event_list: * * List of pending &struct drm_pending_event, used to clean up pending * events in case this file gets closed before the event is signalled. * Uses the &drm_pending_event.pending_link entry. * * Protect by &drm_device.event_lock. */ struct list_head pending_event_list; /** * @event_list: * * List of &struct drm_pending_event, ready for delivery to userspace * through drm_read(). Uses the &drm_pending_event.link entry. * * Protect by &drm_device.event_lock. */ struct list_head event_list; /** * @event_space: * * Available event space to prevent userspace from * exhausting kernel memory. Currently limited to the fairly arbitrary * value of 4KB. */ int event_space; /** @event_read_lock: Serializes drm_read(). */ struct mutex event_read_lock; /** * @prime: * * Per-file buffer caches used by the PRIME buffer sharing code. */ struct drm_prime_file_private prime; /** * @client_name: * * Userspace-provided name; useful for accounting and debugging. */ const char *client_name; /** * @client_name_lock: Protects @client_name. */ struct mutex client_name_lock; /** * @debugfs_client: * * debugfs directory for each client under a drm node. */ struct dentry *debugfs_client; }; /** * drm_is_primary_client - is this an open file of the primary node * @file_priv: DRM file * * Returns true if this is an open file of the primary node, i.e. * &drm_file.minor of @file_priv is a primary minor. * * See also the :ref:`section on primary nodes and authentication * <drm_primary_node>`. */ static inline bool drm_is_primary_client(const struct drm_file *file_priv) { return file_priv->minor->type == DRM_MINOR_PRIMARY; } /** * drm_is_render_client - is this an open file of the render node * @file_priv: DRM file * * Returns true if this is an open file of the render node, i.e. * &drm_file.minor of @file_priv is a render minor. * * See also the :ref:`section on render nodes <drm_render_node>`. */ static inline bool drm_is_render_client(const struct drm_file *file_priv) { return file_priv->minor->type == DRM_MINOR_RENDER; } /** * drm_is_accel_client - is this an open file of the compute acceleration node * @file_priv: DRM file * * Returns true if this is an open file of the compute acceleration node, i.e. * &drm_file.minor of @file_priv is a accel minor. * * See also :doc:`Introduction to compute accelerators subsystem * </accel/introduction>`. */ static inline bool drm_is_accel_client(const struct drm_file *file_priv) { return file_priv->minor->type == DRM_MINOR_ACCEL; } __printf(2, 3) void drm_file_err(struct drm_file *file_priv, const char *fmt, ...); void drm_file_update_pid(struct drm_file *); struct drm_minor *drm_minor_acquire(struct xarray *minors_xa, unsigned int minor_id); void drm_minor_release(struct drm_minor *minor); int drm_open(struct inode *inode, struct file *filp); int drm_open_helper(struct file *filp, struct drm_minor *minor); ssize_t drm_read(struct file *filp, char __user *buffer, size_t count, loff_t *offset); int drm_release(struct inode *inode, struct file *filp); int drm_release_noglobal(struct inode *inode, struct file *filp); __poll_t drm_poll(struct file *filp, struct poll_table_struct *wait); int drm_event_reserve_init_locked(struct drm_device *dev, struct drm_file *file_priv, struct drm_pending_event *p, struct drm_event *e); int drm_event_reserve_init(struct drm_device *dev, struct drm_file *file_priv, struct drm_pending_event *p, struct drm_event *e); void drm_event_cancel_free(struct drm_device *dev, struct drm_pending_event *p); void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e); void drm_send_event(struct drm_device *dev, struct drm_pending_event *e); void drm_send_event_timestamp_locked(struct drm_device *dev, struct drm_pending_event *e, ktime_t timestamp); /** * struct drm_memory_stats - GEM object stats associated * @shared: Total size of GEM objects shared between processes * @private: Total size of GEM objects * @resident: Total size of GEM objects backing pages * @purgeable: Total size of GEM objects that can be purged (resident and not active) * @active: Total size of GEM objects active on one or more engines * * Used by drm_print_memory_stats() */ struct drm_memory_stats { u64 shared; u64 private; u64 resident; u64 purgeable; u64 active; }; enum drm_gem_object_status; int drm_memory_stats_is_zero(const struct drm_memory_stats *stats); void drm_fdinfo_print_size(struct drm_printer *p, const char *prefix, const char *stat, const char *region, u64 sz); void drm_print_memory_stats(struct drm_printer *p, const struct drm_memory_stats *stats, enum drm_gem_object_status supported_status, const char *region); void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file); void drm_show_fdinfo(struct seq_file *m, struct file *f); struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags); #endif /* _DRM_FILE_H_ */
1 1 1 1 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 // SPDX-License-Identifier: GPL-2.0 /* * U2F Zero LED and RNG driver * * Copyright 2018 Andrej Shadura <andrew@shadura.me> * Loosely based on drivers/hid/hid-led.c * and drivers/usb/misc/chaoskey.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. */ #include <linux/hid.h> #include <linux/hidraw.h> #include <linux/hw_random.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/usb.h> #include "usbhid/usbhid.h" #include "hid-ids.h" #define DRIVER_SHORT "u2fzero" #define HID_REPORT_SIZE 64 enum hw_revision { HW_U2FZERO, HW_NITROKEY_U2F, }; struct hw_revision_config { u8 rng_cmd; u8 wink_cmd; const char *name; }; static const struct hw_revision_config hw_configs[] = { [HW_U2FZERO] = { .rng_cmd = 0x21, .wink_cmd = 0x24, .name = "U2F Zero", }, [HW_NITROKEY_U2F] = { .rng_cmd = 0xc0, .wink_cmd = 0xc2, .name = "NitroKey U2F", }, }; /* We only use broadcast (CID-less) messages */ #define CID_BROADCAST 0xffffffff struct u2f_hid_msg { u32 cid; union { struct { u8 cmd; u8 bcnth; u8 bcntl; u8 data[HID_REPORT_SIZE - 7]; } init; struct { u8 seq; u8 data[HID_REPORT_SIZE - 5]; } cont; }; } __packed; struct u2f_hid_report { u8 report_type; struct u2f_hid_msg msg; } __packed; #define U2F_HID_MSG_LEN(f) (size_t)(((f).init.bcnth << 8) + (f).init.bcntl) struct u2fzero_device { struct hid_device *hdev; struct urb *urb; /* URB for the RNG data */ struct led_classdev ldev; /* Embedded struct for led */ struct hwrng hwrng; /* Embedded struct for hwrng */ char *led_name; char *rng_name; u8 *buf_out; u8 *buf_in; struct mutex lock; bool present; kernel_ulong_t hw_revision; }; static int u2fzero_send(struct u2fzero_device *dev, struct u2f_hid_report *req) { int ret; mutex_lock(&dev->lock); memcpy(dev->buf_out, req, sizeof(struct u2f_hid_report)); ret = hid_hw_output_report(dev->hdev, dev->buf_out, sizeof(struct u2f_hid_msg)); mutex_unlock(&dev->lock); if (ret < 0) return ret; return ret == sizeof(struct u2f_hid_msg) ? 0 : -EMSGSIZE; } struct u2fzero_transfer_context { struct completion done; int status; }; static void u2fzero_read_callback(struct urb *urb) { struct u2fzero_transfer_context *ctx = urb->context; ctx->status = urb->status; complete(&ctx->done); } static int u2fzero_recv(struct u2fzero_device *dev, struct u2f_hid_report *req, struct u2f_hid_msg *resp) { int ret; struct hid_device *hdev = dev->hdev; struct u2fzero_transfer_context ctx; mutex_lock(&dev->lock); memcpy(dev->buf_out, req, sizeof(struct u2f_hid_report)); dev->urb->context = &ctx; init_completion(&ctx.done); ret = usb_submit_urb(dev->urb, GFP_NOIO); if (unlikely(ret)) { hid_err(hdev, "usb_submit_urb failed: %d", ret); goto err; } ret = hid_hw_output_report(dev->hdev, dev->buf_out, sizeof(struct u2f_hid_msg)); if (ret < 0) { hid_err(hdev, "hid_hw_output_report failed: %d", ret); goto err; } ret = (wait_for_completion_timeout( &ctx.done, msecs_to_jiffies(USB_CTRL_SET_TIMEOUT))); if (ret == 0) { usb_kill_urb(dev->urb); hid_err(hdev, "urb submission timed out"); } else { ret = dev->urb->actual_length; memcpy(resp, dev->buf_in, ret); } err: mutex_unlock(&dev->lock); return ret; } static int u2fzero_blink(struct led_classdev *ldev) { struct u2fzero_device *dev = container_of(ldev, struct u2fzero_device, ldev); struct u2f_hid_report req = { .report_type = 0, .msg.cid = CID_BROADCAST, .msg.init = { .cmd = hw_configs[dev->hw_revision].wink_cmd, .bcnth = 0, .bcntl = 0, .data = {0}, } }; return u2fzero_send(dev, &req); } static int u2fzero_brightness_set(struct led_classdev *ldev, enum led_brightness brightness) { ldev->brightness = LED_OFF; if (brightness) return u2fzero_blink(ldev); else return 0; } static int u2fzero_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct u2fzero_device *dev = container_of(rng, struct u2fzero_device, hwrng); struct u2f_hid_report req = { .report_type = 0, .msg.cid = CID_BROADCAST, .msg.init = { .cmd = hw_configs[dev->hw_revision].rng_cmd, .bcnth = 0, .bcntl = 0, .data = {0}, } }; struct u2f_hid_msg resp; int ret; size_t actual_length; /* valid packets must have a correct header */ int min_length = offsetof(struct u2f_hid_msg, init.data); if (!dev->present) { hid_dbg(dev->hdev, "device not present"); return 0; } ret = u2fzero_recv(dev, &req, &resp); /* ignore errors or packets without data */ if (ret < min_length) return 0; /* only take the minimum amount of data it is safe to take */ actual_length = min3((size_t)ret - min_length, U2F_HID_MSG_LEN(resp), max); memcpy(data, resp.init.data, actual_length); return actual_length; } static int u2fzero_init_led(struct u2fzero_device *dev, unsigned int minor) { dev->led_name = devm_kasprintf(&dev->hdev->dev, GFP_KERNEL, "%s%u", DRIVER_SHORT, minor); if (dev->led_name == NULL) return -ENOMEM; dev->ldev.name = dev->led_name; dev->ldev.max_brightness = LED_ON; dev->ldev.flags = LED_HW_PLUGGABLE; dev->ldev.brightness_set_blocking = u2fzero_brightness_set; return devm_led_classdev_register(&dev->hdev->dev, &dev->ldev); } static int u2fzero_init_hwrng(struct u2fzero_device *dev, unsigned int minor) { dev->rng_name = devm_kasprintf(&dev->hdev->dev, GFP_KERNEL, "%s-rng%u", DRIVER_SHORT, minor); if (dev->rng_name == NULL) return -ENOMEM; dev->hwrng.name = dev->rng_name; dev->hwrng.read = u2fzero_rng_read; return devm_hwrng_register(&dev->hdev->dev, &dev->hwrng); } static int u2fzero_fill_in_urb(struct u2fzero_device *dev) { struct hid_device *hdev = dev->hdev; struct usb_device *udev; struct usbhid_device *usbhid = hdev->driver_data; unsigned int pipe_in; struct usb_host_endpoint *ep; if (dev->hdev->bus != BUS_USB) return -EINVAL; udev = hid_to_usb_dev(hdev); if (!usbhid->urbout || !usbhid->urbin) return -ENODEV; ep = usb_pipe_endpoint(udev, usbhid->urbin->pipe); if (!ep) return -ENODEV; dev->urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urb) return -ENOMEM; pipe_in = (usbhid->urbin->pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30); usb_fill_int_urb(dev->urb, udev, pipe_in, dev->buf_in, HID_REPORT_SIZE, u2fzero_read_callback, NULL, ep->desc.bInterval); return 0; } static int u2fzero_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct u2fzero_device *dev; unsigned int minor; int ret; if (!hid_is_usb(hdev)) return -EINVAL; dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL); if (dev == NULL) return -ENOMEM; dev->hw_revision = id->driver_data; dev->buf_out = devm_kmalloc(&hdev->dev, sizeof(struct u2f_hid_report), GFP_KERNEL); if (dev->buf_out == NULL) return -ENOMEM; dev->buf_in = devm_kmalloc(&hdev->dev, sizeof(struct u2f_hid_msg), GFP_KERNEL); if (dev->buf_in == NULL) return -ENOMEM; ret = hid_parse(hdev); if (ret) return ret; dev->hdev = hdev; hid_set_drvdata(hdev, dev); mutex_init(&dev->lock); ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); if (ret) return ret; u2fzero_fill_in_urb(dev); dev->present = true; minor = ((struct hidraw *) hdev->hidraw)->minor; ret = u2fzero_init_led(dev, minor); if (ret) { hid_hw_stop(hdev); return ret; } hid_info(hdev, "%s LED initialised\n", hw_configs[dev->hw_revision].name); ret = u2fzero_init_hwrng(dev, minor); if (ret) { hid_hw_stop(hdev); return ret; } hid_info(hdev, "%s RNG initialised\n", hw_configs[dev->hw_revision].name); return 0; } static void u2fzero_remove(struct hid_device *hdev) { struct u2fzero_device *dev = hid_get_drvdata(hdev); mutex_lock(&dev->lock); dev->present = false; mutex_unlock(&dev->lock); hid_hw_stop(hdev); usb_poison_urb(dev->urb); usb_free_urb(dev->urb); } static const struct hid_device_id u2fzero_table[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_U2F_ZERO), .driver_data = HW_U2FZERO }, { HID_USB_DEVICE(USB_VENDOR_ID_CLAY_LOGIC, USB_DEVICE_ID_NITROKEY_U2F), .driver_data = HW_NITROKEY_U2F }, { } }; MODULE_DEVICE_TABLE(hid, u2fzero_table); static struct hid_driver u2fzero_driver = { .name = "hid-" DRIVER_SHORT, .probe = u2fzero_probe, .remove = u2fzero_remove, .id_table = u2fzero_table, }; module_hid_driver(u2fzero_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Andrej Shadura <andrew@shadura.me>"); MODULE_DESCRIPTION("U2F Zero LED and RNG driver");
1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 // SPDX-License-Identifier: GPL-2.0-or-later /* * drivers/net/team/team_mode_activebackup.c - Active-backup mode for team * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <net/rtnetlink.h> #include <linux/if_team.h> struct ab_priv { struct team_port __rcu *active_port; struct team_option_inst_info *ap_opt_inst_info; }; static struct ab_priv *ab_priv(struct team *team) { return (struct ab_priv *) &team->mode_priv; } static rx_handler_result_t ab_receive(struct team *team, struct team_port *port, struct sk_buff *skb) { struct team_port *active_port; active_port = rcu_dereference(ab_priv(team)->active_port); if (active_port != port) return RX_HANDLER_EXACT; return RX_HANDLER_ANOTHER; } static bool ab_transmit(struct team *team, struct sk_buff *skb) { struct team_port *active_port; active_port = rcu_dereference_bh(ab_priv(team)->active_port); if (unlikely(!active_port)) goto drop; if (team_dev_queue_xmit(team, active_port, skb)) return false; return true; drop: dev_kfree_skb_any(skb); return false; } static void ab_port_leave(struct team *team, struct team_port *port) { if (ab_priv(team)->active_port == port) { RCU_INIT_POINTER(ab_priv(team)->active_port, NULL); team_option_inst_set_change(ab_priv(team)->ap_opt_inst_info); } } static void ab_active_port_init(struct team *team, struct team_option_inst_info *info) { ab_priv(team)->ap_opt_inst_info = info; } static void ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *active_port; active_port = rtnl_dereference(ab_priv(team)->active_port); if (active_port) ctx->data.u32_val = active_port->dev->ifindex; else ctx->data.u32_val = 0; } static int ab_active_port_set(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port; list_for_each_entry(port, &team->port_list, list) { if (port->dev->ifindex == ctx->data.u32_val) { rcu_assign_pointer(ab_priv(team)->active_port, port); return 0; } } return -ENOENT; } static const struct team_option ab_options[] = { { .name = "activeport", .type = TEAM_OPTION_TYPE_U32, .init = ab_active_port_init, .getter = ab_active_port_get, .setter = ab_active_port_set, }, }; static int ab_init(struct team *team) { return team_options_register(team, ab_options, ARRAY_SIZE(ab_options)); } static void ab_exit(struct team *team) { team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options)); } static const struct team_mode_ops ab_mode_ops = { .init = ab_init, .exit = ab_exit, .receive = ab_receive, .transmit = ab_transmit, .port_leave = ab_port_leave, }; static const struct team_mode ab_mode = { .kind = "activebackup", .owner = THIS_MODULE, .priv_size = sizeof(struct ab_priv), .ops = &ab_mode_ops, .lag_tx_type = NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, }; static int __init ab_init_module(void) { return team_mode_register(&ab_mode); } static void __exit ab_cleanup_module(void) { team_mode_unregister(&ab_mode); } module_init(ab_init_module); module_exit(ab_cleanup_module); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>"); MODULE_DESCRIPTION("Active-backup mode for team"); MODULE_ALIAS_TEAM_MODE("activebackup");
4 4 25 3 3 4 1 1 1 1 116 1 1 1 1 1 1 1 1 1 1 117 24 116 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 // SPDX-License-Identifier: GPL-2.0 /* * media.c - Media Controller specific ALSA driver code * * Copyright (c) 2019 Shuah Khan <shuah@kernel.org> * */ /* * This file adds Media Controller support to the ALSA driver * to use the Media Controller API to share the tuner with DVB * and V4L2 drivers that control the media device. * * The media device is created based on the existing quirks framework. * Using this approach, the media controller API usage can be added for * a specific device. */ #include <linux/init.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/usb.h> #include <sound/pcm.h> #include <sound/core.h> #include "usbaudio.h" #include "card.h" #include "mixer.h" #include "media.h" int snd_media_stream_init(struct snd_usb_substream *subs, struct snd_pcm *pcm, int stream) { struct media_device *mdev; struct media_ctl *mctl; struct device *pcm_dev = pcm->streams[stream].dev; u32 intf_type; int ret = 0; u16 mixer_pad; struct media_entity *entity; mdev = subs->stream->chip->media_dev; if (!mdev) return 0; if (subs->media_ctl) return 0; /* allocate media_ctl */ mctl = kzalloc(sizeof(*mctl), GFP_KERNEL); if (!mctl) return -ENOMEM; mctl->media_dev = mdev; if (stream == SNDRV_PCM_STREAM_PLAYBACK) { intf_type = MEDIA_INTF_T_ALSA_PCM_PLAYBACK; mctl->media_entity.function = MEDIA_ENT_F_AUDIO_PLAYBACK; mctl->media_pad.flags = MEDIA_PAD_FL_SOURCE; mixer_pad = 1; } else { intf_type = MEDIA_INTF_T_ALSA_PCM_CAPTURE; mctl->media_entity.function = MEDIA_ENT_F_AUDIO_CAPTURE; mctl->media_pad.flags = MEDIA_PAD_FL_SINK; mixer_pad = 2; } mctl->media_entity.name = pcm->name; media_entity_pads_init(&mctl->media_entity, 1, &mctl->media_pad); ret = media_device_register_entity(mctl->media_dev, &mctl->media_entity); if (ret) goto free_mctl; mctl->intf_devnode = media_devnode_create(mdev, intf_type, 0, MAJOR(pcm_dev->devt), MINOR(pcm_dev->devt)); if (!mctl->intf_devnode) { ret = -ENOMEM; goto unregister_entity; } mctl->intf_link = media_create_intf_link(&mctl->media_entity, &mctl->intf_devnode->intf, MEDIA_LNK_FL_ENABLED); if (!mctl->intf_link) { ret = -ENOMEM; goto devnode_remove; } /* create link between mixer and audio */ media_device_for_each_entity(entity, mdev) { switch (entity->function) { case MEDIA_ENT_F_AUDIO_MIXER: ret = media_create_pad_link(entity, mixer_pad, &mctl->media_entity, 0, MEDIA_LNK_FL_ENABLED); if (ret) goto remove_intf_link; break; } } subs->media_ctl = mctl; return 0; remove_intf_link: media_remove_intf_link(mctl->intf_link); devnode_remove: media_devnode_remove(mctl->intf_devnode); unregister_entity: media_device_unregister_entity(&mctl->media_entity); free_mctl: kfree(mctl); return ret; } void snd_media_stream_delete(struct snd_usb_substream *subs) { struct media_ctl *mctl = subs->media_ctl; if (mctl) { struct media_device *mdev; mdev = mctl->media_dev; if (mdev && media_devnode_is_registered(mdev->devnode)) { media_devnode_remove(mctl->intf_devnode); media_device_unregister_entity(&mctl->media_entity); media_entity_cleanup(&mctl->media_entity); } kfree(mctl); subs->media_ctl = NULL; } } int snd_media_start_pipeline(struct snd_usb_substream *subs) { struct media_ctl *mctl = subs->media_ctl; int ret = 0; if (!mctl) return 0; guard(mutex)(&mctl->media_dev->graph_mutex); if (mctl->media_dev->enable_source) ret = mctl->media_dev->enable_source(&mctl->media_entity, &mctl->media_pipe); return ret; } void snd_media_stop_pipeline(struct snd_usb_substream *subs) { struct media_ctl *mctl = subs->media_ctl; if (!mctl) return; guard(mutex)(&mctl->media_dev->graph_mutex); if (mctl->media_dev->disable_source) mctl->media_dev->disable_source(&mctl->media_entity); } static int snd_media_mixer_init(struct snd_usb_audio *chip) { struct device *ctl_dev = chip->card->ctl_dev; struct media_intf_devnode *ctl_intf; struct usb_mixer_interface *mixer; struct media_device *mdev = chip->media_dev; struct media_mixer_ctl *mctl; u32 intf_type = MEDIA_INTF_T_ALSA_CONTROL; int ret; if (!mdev) return -ENODEV; ctl_intf = chip->ctl_intf_media_devnode; if (!ctl_intf) { ctl_intf = media_devnode_create(mdev, intf_type, 0, MAJOR(ctl_dev->devt), MINOR(ctl_dev->devt)); if (!ctl_intf) return -ENOMEM; chip->ctl_intf_media_devnode = ctl_intf; } list_for_each_entry(mixer, &chip->mixer_list, list) { if (mixer->media_mixer_ctl) continue; /* allocate media_mixer_ctl */ mctl = kzalloc(sizeof(*mctl), GFP_KERNEL); if (!mctl) return -ENOMEM; mctl->media_dev = mdev; mctl->media_entity.function = MEDIA_ENT_F_AUDIO_MIXER; mctl->media_entity.name = chip->card->mixername; mctl->media_pad[0].flags = MEDIA_PAD_FL_SINK; mctl->media_pad[1].flags = MEDIA_PAD_FL_SOURCE; mctl->media_pad[2].flags = MEDIA_PAD_FL_SOURCE; media_entity_pads_init(&mctl->media_entity, MEDIA_MIXER_PAD_MAX, mctl->media_pad); ret = media_device_register_entity(mctl->media_dev, &mctl->media_entity); if (ret) { kfree(mctl); return ret; } mctl->intf_link = media_create_intf_link(&mctl->media_entity, &ctl_intf->intf, MEDIA_LNK_FL_ENABLED); if (!mctl->intf_link) { media_device_unregister_entity(&mctl->media_entity); media_entity_cleanup(&mctl->media_entity); kfree(mctl); return -ENOMEM; } mctl->intf_devnode = ctl_intf; mixer->media_mixer_ctl = mctl; } return 0; } static void snd_media_mixer_delete(struct snd_usb_audio *chip) { struct usb_mixer_interface *mixer; struct media_device *mdev = chip->media_dev; if (!mdev) return; list_for_each_entry(mixer, &chip->mixer_list, list) { struct media_mixer_ctl *mctl; mctl = mixer->media_mixer_ctl; if (!mixer->media_mixer_ctl) continue; if (media_devnode_is_registered(mdev->devnode)) { media_device_unregister_entity(&mctl->media_entity); media_entity_cleanup(&mctl->media_entity); } kfree(mctl); mixer->media_mixer_ctl = NULL; } if (media_devnode_is_registered(mdev->devnode)) media_devnode_remove(chip->ctl_intf_media_devnode); chip->ctl_intf_media_devnode = NULL; } int snd_media_device_create(struct snd_usb_audio *chip, struct usb_interface *iface) { struct media_device *mdev; struct usb_device *usbdev = interface_to_usbdev(iface); int ret = 0; /* usb-audio driver is probed for each usb interface, and * there are multiple interfaces per device. Avoid calling * media_device_usb_allocate() each time usb_audio_probe() * is called. Do it only once. */ if (chip->media_dev) { mdev = chip->media_dev; goto snd_mixer_init; } mdev = media_device_usb_allocate(usbdev, KBUILD_MODNAME, THIS_MODULE); if (IS_ERR(mdev)) return -ENOMEM; /* save media device - avoid lookups */ chip->media_dev = mdev; snd_mixer_init: /* Create media entities for mixer and control dev */ ret = snd_media_mixer_init(chip); /* media_device might be registered, print error and continue */ if (ret) dev_err(&usbdev->dev, "Couldn't create media mixer entities. Error: %d\n", ret); if (!media_devnode_is_registered(mdev->devnode)) { /* don't register if snd_media_mixer_init() failed */ if (ret) goto create_fail; /* register media_device */ ret = media_device_register(mdev); create_fail: if (ret) { snd_media_mixer_delete(chip); media_device_delete(mdev, KBUILD_MODNAME, THIS_MODULE); /* clear saved media_dev */ chip->media_dev = NULL; dev_err(&usbdev->dev, "Couldn't register media device. Error: %d\n", ret); return ret; } } return ret; } void snd_media_device_delete(struct snd_usb_audio *chip) { struct media_device *mdev = chip->media_dev; struct snd_usb_stream *stream; /* release resources */ list_for_each_entry(stream, &chip->pcm_list, list) { snd_media_stream_delete(&stream->substream[0]); snd_media_stream_delete(&stream->substream[1]); } snd_media_mixer_delete(chip); if (mdev) { media_device_delete(mdev, KBUILD_MODNAME, THIS_MODULE); chip->media_dev = NULL; } }
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 // SPDX-License-Identifier: GPL-2.0-or-later /* * IPVS: Least-Connection Scheduling module * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * * Changes: * Wensong Zhang : added the ip_vs_lc_update_svc * Wensong Zhang : added any dest with weight=0 is quiesced */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <net/ip_vs.h> /* * Least Connection scheduling */ static struct ip_vs_dest * ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, struct ip_vs_iphdr *iph) { struct ip_vs_dest *dest, *least = NULL; unsigned int loh = 0, doh; IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); /* * Simply select the server with the least number of * (activeconns<<5) + inactconns * Except whose weight is equal to zero. * If the weight is equal to zero, it means that the server is * quiesced, the existing connections to the server still get * served, but no new connection is assigned to the server. */ list_for_each_entry_rcu(dest, &svc->destinations, n_list) { if ((dest->flags & IP_VS_DEST_F_OVERLOAD) || atomic_read(&dest->weight) == 0) continue; doh = ip_vs_dest_conn_overhead(dest); if (!least || doh < loh) { least = dest; loh = doh; } } if (!least) ip_vs_scheduler_err(svc, "no destination available"); else IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d " "inactconns %d\n", IP_VS_DBG_ADDR(least->af, &least->addr), ntohs(least->port), atomic_read(&least->activeconns), atomic_read(&least->inactconns)); return least; } static struct ip_vs_scheduler ip_vs_lc_scheduler = { .name = "lc", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list), .schedule = ip_vs_lc_schedule, }; static int __init ip_vs_lc_init(void) { return register_ip_vs_scheduler(&ip_vs_lc_scheduler) ; } static void __exit ip_vs_lc_cleanup(void) { unregister_ip_vs_scheduler(&ip_vs_lc_scheduler); synchronize_rcu(); } module_init(ip_vs_lc_init); module_exit(ip_vs_lc_cleanup); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ipvs least connection scheduler");
1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 // SPDX-License-Identifier: GPL-2.0-or-later /* * Sunplus spca561 subdriver * * Copyright (C) 2004 Michel Xhaard mxhaard@magic.fr * * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "spca561" #include <linux/input.h> #include "gspca.h" MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>"); MODULE_DESCRIPTION("GSPCA/SPCA561 USB Camera Driver"); MODULE_LICENSE("GPL"); #define EXPOSURE_MAX (2047 + 325) /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct { /* hue/contrast control cluster */ struct v4l2_ctrl *contrast; struct v4l2_ctrl *hue; }; struct v4l2_ctrl *autogain; #define EXPO12A_DEF 3 __u8 expo12a; /* expo/gain? for rev 12a */ __u8 chip_revision; #define Rev012A 0 #define Rev072A 1 signed char ag_cnt; #define AG_CNT_START 13 }; static const struct v4l2_pix_format sif_012a_mode[] = { {160, 120, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 3}, {176, 144, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2}, {320, 240, V4L2_PIX_FMT_SPCA561, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 4 / 8, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {352, 288, V4L2_PIX_FMT_SPCA561, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 4 / 8, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; static const struct v4l2_pix_format sif_072a_mode[] = { {160, 120, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 3}, {176, 144, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2}, {320, 240, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {352, 288, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; /* * Initialization data * I'm not very sure how to split initialization from open data * chunks. For now, we'll consider everything as initialization */ /* Frame packet header offsets for the spca561 */ #define SPCA561_OFFSET_SNAP 1 #define SPCA561_OFFSET_TYPE 2 #define SPCA561_OFFSET_COMPRESS 3 #define SPCA561_OFFSET_FRAMSEQ 4 #define SPCA561_OFFSET_GPIO 5 #define SPCA561_OFFSET_USBBUFF 6 #define SPCA561_OFFSET_WIN2GRAVE 7 #define SPCA561_OFFSET_WIN2RAVE 8 #define SPCA561_OFFSET_WIN2BAVE 9 #define SPCA561_OFFSET_WIN2GBAVE 10 #define SPCA561_OFFSET_WIN1GRAVE 11 #define SPCA561_OFFSET_WIN1RAVE 12 #define SPCA561_OFFSET_WIN1BAVE 13 #define SPCA561_OFFSET_WIN1GBAVE 14 #define SPCA561_OFFSET_FREQ 15 #define SPCA561_OFFSET_VSYNC 16 #define SPCA561_INDEX_I2C_BASE 0x8800 #define SPCA561_SNAPBIT 0x20 #define SPCA561_SNAPCTRL 0x40 static const u16 rev72a_reset[][2] = { {0x0000, 0x8114}, /* Software GPIO output data */ {0x0001, 0x8114}, /* Software GPIO output data */ {0x0000, 0x8112}, /* Some kind of reset */ {} }; static const __u16 rev72a_init_data1[][2] = { {0x0003, 0x8701}, /* PCLK clock delay adjustment */ {0x0001, 0x8703}, /* HSYNC from cmos inverted */ {0x0011, 0x8118}, /* Enable and conf sensor */ {0x0001, 0x8118}, /* Conf sensor */ {0x0092, 0x8804}, /* I know nothing about these */ {0x0010, 0x8802}, /* 0x88xx registers, so I won't */ {} }; static const u16 rev72a_init_sensor1[][2] = { {0x0001, 0x000d}, {0x0002, 0x0018}, {0x0004, 0x0165}, {0x0005, 0x0021}, {0x0007, 0x00aa}, {0x0020, 0x1504}, {0x0039, 0x0002}, {0x0035, 0x0010}, {0x0009, 0x1049}, {0x0028, 0x000b}, {0x003b, 0x000f}, {0x003c, 0x0000}, {} }; static const __u16 rev72a_init_data2[][2] = { {0x0018, 0x8601}, /* Pixel/line selection for color separation */ {0x0000, 0x8602}, /* Optical black level for user setting */ {0x0060, 0x8604}, /* Optical black horizontal offset */ {0x0002, 0x8605}, /* Optical black vertical offset */ {0x0000, 0x8603}, /* Non-automatic optical black level */ {0x0002, 0x865b}, /* Horizontal offset for valid pixels */ {0x0000, 0x865f}, /* Vertical valid pixels window (x2) */ {0x00b0, 0x865d}, /* Horizontal valid pixels window (x2) */ {0x0090, 0x865e}, /* Vertical valid lines window (x2) */ {0x00e0, 0x8406}, /* Memory buffer threshold */ {0x0000, 0x8660}, /* Compensation memory stuff */ {0x0002, 0x8201}, /* Output address for r/w serial EEPROM */ {0x0008, 0x8200}, /* Clear valid bit for serial EEPROM */ {0x0001, 0x8200}, /* OprMode to be executed by hardware */ /* from ms-win */ {0x0000, 0x8611}, /* R offset for white balance */ {0x00fd, 0x8612}, /* Gr offset for white balance */ {0x0003, 0x8613}, /* B offset for white balance */ {0x0000, 0x8614}, /* Gb offset for white balance */ /* from ms-win */ {0x0035, 0x8651}, /* R gain for white balance */ {0x0040, 0x8652}, /* Gr gain for white balance */ {0x005f, 0x8653}, /* B gain for white balance */ {0x0040, 0x8654}, /* Gb gain for white balance */ {0x0002, 0x8502}, /* Maximum average bit rate stuff */ {0x0011, 0x8802}, {0x0087, 0x8700}, /* Set master clock (96Mhz????) */ {0x0081, 0x8702}, /* Master clock output enable */ {0x0000, 0x8500}, /* Set image type (352x288 no compression) */ /* Originally was 0x0010 (352x288 compression) */ {0x0002, 0x865b}, /* Horizontal offset for valid pixels */ {0x0003, 0x865c}, /* Vertical offset for valid lines */ {} }; static const u16 rev72a_init_sensor2[][2] = { {0x0003, 0x0121}, {0x0004, 0x0165}, {0x0005, 0x002f}, /* blanking control column */ {0x0006, 0x0000}, /* blanking mode row*/ {0x000a, 0x0002}, {0x0009, 0x1061}, /* setexposure times && pixel clock * 0001 0 | 000 0110 0001 */ {0x0035, 0x0014}, {} }; /******************** QC Express etch2 stuff ********************/ static const __u16 Pb100_1map8300[][2] = { /* reg, value */ {0x8320, 0x3304}, {0x8303, 0x0125}, /* image area */ {0x8304, 0x0169}, {0x8328, 0x000b}, {0x833c, 0x0001}, /*fixme: win:07*/ {0x832f, 0x1904}, /*fixme: was 0419*/ {0x8307, 0x00aa}, {0x8301, 0x0003}, {0x8302, 0x000e}, {} }; static const __u16 Pb100_2map8300[][2] = { /* reg, value */ {0x8339, 0x0000}, {0x8307, 0x00aa}, {} }; static const __u16 spca561_161rev12A_data1[][2] = { {0x29, 0x8118}, /* Control register (various enable bits) */ {0x08, 0x8114}, /* GPIO: Led off */ {0x0e, 0x8112}, /* 0x0e stream off 0x3e stream on */ {0x00, 0x8102}, /* white balance - new */ {0x92, 0x8804}, {0x04, 0x8802}, /* windows uses 08 */ {} }; static const __u16 spca561_161rev12A_data2[][2] = { {0x21, 0x8118}, {0x10, 0x8500}, {0x07, 0x8601}, {0x07, 0x8602}, {0x04, 0x8501}, {0x07, 0x8201}, /* windows uses 02 */ {0x08, 0x8200}, {0x01, 0x8200}, {0x90, 0x8604}, {0x00, 0x8605}, {0xb0, 0x8603}, /* sensor gains */ {0x07, 0x8601}, /* white balance - new */ {0x07, 0x8602}, /* white balance - new */ {0x00, 0x8610}, /* *red */ {0x00, 0x8611}, /* 3f *green */ {0x00, 0x8612}, /* green *blue */ {0x00, 0x8613}, /* blue *green */ {0x43, 0x8614}, /* green *red - white balance - was 0x35 */ {0x40, 0x8615}, /* 40 *green - white balance - was 0x35 */ {0x71, 0x8616}, /* 7a *blue - white balance - was 0x35 */ {0x40, 0x8617}, /* 40 *green - white balance - was 0x35 */ {0x0c, 0x8620}, /* 0c */ {0xc8, 0x8631}, /* c8 */ {0xc8, 0x8634}, /* c8 */ {0x23, 0x8635}, /* 23 */ {0x1f, 0x8636}, /* 1f */ {0xdd, 0x8637}, /* dd */ {0xe1, 0x8638}, /* e1 */ {0x1d, 0x8639}, /* 1d */ {0x21, 0x863a}, /* 21 */ {0xe3, 0x863b}, /* e3 */ {0xdf, 0x863c}, /* df */ {0xf0, 0x8505}, {0x32, 0x850a}, /* {0x99, 0x8700}, * - white balance - new (removed) */ /* HDG we used to do this in stop0, making the init state and the state after a start / stop different, so do this here instead. */ {0x29, 0x8118}, {} }; static void reg_w_val(struct gspca_dev *gspca_dev, __u16 index, __u8 value) { int ret; struct usb_device *dev = gspca_dev->dev; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 500); gspca_dbg(gspca_dev, D_USBO, "reg write: 0x%02x:0x%02x\n", index, value); if (ret < 0) pr_err("reg write: error %d\n", ret); } static void write_vector(struct gspca_dev *gspca_dev, const __u16 data[][2]) { int i; i = 0; while (data[i][1] != 0) { reg_w_val(gspca_dev, data[i][1], data[i][0]); i++; } } /* read 'len' bytes to gspca_dev->usb_buf */ static void reg_r(struct gspca_dev *gspca_dev, __u16 index, __u16 length) { usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, gspca_dev->usb_buf, length, 500); } /* write 'len' bytes from gspca_dev->usb_buf */ static void reg_w_buf(struct gspca_dev *gspca_dev, __u16 index, __u16 len) { usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, gspca_dev->usb_buf, len, 500); } static void i2c_write(struct gspca_dev *gspca_dev, __u16 value, __u16 reg) { int retry = 60; reg_w_val(gspca_dev, 0x8801, reg); reg_w_val(gspca_dev, 0x8805, value); reg_w_val(gspca_dev, 0x8800, value >> 8); do { reg_r(gspca_dev, 0x8803, 1); if (!gspca_dev->usb_buf[0]) return; msleep(10); } while (--retry); } static int i2c_read(struct gspca_dev *gspca_dev, __u16 reg, __u8 mode) { int retry = 60; __u8 value; reg_w_val(gspca_dev, 0x8804, 0x92); reg_w_val(gspca_dev, 0x8801, reg); reg_w_val(gspca_dev, 0x8802, mode | 0x01); do { reg_r(gspca_dev, 0x8803, 1); if (!gspca_dev->usb_buf[0]) { reg_r(gspca_dev, 0x8800, 1); value = gspca_dev->usb_buf[0]; reg_r(gspca_dev, 0x8805, 1); return ((int) value << 8) | gspca_dev->usb_buf[0]; } msleep(10); } while (--retry); return -1; } static void sensor_mapwrite(struct gspca_dev *gspca_dev, const __u16 (*sensormap)[2]) { while ((*sensormap)[0]) { gspca_dev->usb_buf[0] = (*sensormap)[1]; gspca_dev->usb_buf[1] = (*sensormap)[1] >> 8; reg_w_buf(gspca_dev, (*sensormap)[0], 2); sensormap++; } } static void write_sensor_72a(struct gspca_dev *gspca_dev, const __u16 (*sensor)[2]) { while ((*sensor)[0]) { i2c_write(gspca_dev, (*sensor)[1], (*sensor)[0]); sensor++; } } static void init_161rev12A(struct gspca_dev *gspca_dev) { write_vector(gspca_dev, spca561_161rev12A_data1); sensor_mapwrite(gspca_dev, Pb100_1map8300); /*fixme: should be in sd_start*/ write_vector(gspca_dev, spca561_161rev12A_data2); sensor_mapwrite(gspca_dev, Pb100_2map8300); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; __u16 vendor, product; __u8 data1, data2; /* Read frm global register the USB product and vendor IDs, just to * prove that we can communicate with the device. This works, which * confirms at we are communicating properly and that the device * is a 561. */ reg_r(gspca_dev, 0x8104, 1); data1 = gspca_dev->usb_buf[0]; reg_r(gspca_dev, 0x8105, 1); data2 = gspca_dev->usb_buf[0]; vendor = (data2 << 8) | data1; reg_r(gspca_dev, 0x8106, 1); data1 = gspca_dev->usb_buf[0]; reg_r(gspca_dev, 0x8107, 1); data2 = gspca_dev->usb_buf[0]; product = (data2 << 8) | data1; if (vendor != id->idVendor || product != id->idProduct) { gspca_dbg(gspca_dev, D_PROBE, "Bad vendor / product from device\n"); return -EINVAL; } cam = &gspca_dev->cam; cam->needs_full_bandwidth = 1; sd->chip_revision = id->driver_info; if (sd->chip_revision == Rev012A) { cam->cam_mode = sif_012a_mode; cam->nmodes = ARRAY_SIZE(sif_012a_mode); } else { cam->cam_mode = sif_072a_mode; cam->nmodes = ARRAY_SIZE(sif_072a_mode); } sd->expo12a = EXPO12A_DEF; return 0; } /* this function is called at probe and resume time */ static int sd_init_12a(struct gspca_dev *gspca_dev) { gspca_dbg(gspca_dev, D_STREAM, "Chip revision: 012a\n"); init_161rev12A(gspca_dev); return 0; } static int sd_init_72a(struct gspca_dev *gspca_dev) { gspca_dbg(gspca_dev, D_STREAM, "Chip revision: 072a\n"); write_vector(gspca_dev, rev72a_reset); msleep(200); write_vector(gspca_dev, rev72a_init_data1); write_sensor_72a(gspca_dev, rev72a_init_sensor1); write_vector(gspca_dev, rev72a_init_data2); write_sensor_72a(gspca_dev, rev72a_init_sensor2); reg_w_val(gspca_dev, 0x8112, 0x30); return 0; } static void setbrightness(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; __u16 reg; if (sd->chip_revision == Rev012A) reg = 0x8610; else reg = 0x8611; reg_w_val(gspca_dev, reg + 0, val); /* R */ reg_w_val(gspca_dev, reg + 1, val); /* Gr */ reg_w_val(gspca_dev, reg + 2, val); /* B */ reg_w_val(gspca_dev, reg + 3, val); /* Gb */ } static void setwhite(struct gspca_dev *gspca_dev, s32 white, s32 contrast) { struct sd *sd = (struct sd *) gspca_dev; __u8 blue, red; __u16 reg; /* try to emulate MS-win as possible */ red = 0x20 + white * 3 / 8; blue = 0x90 - white * 5 / 8; if (sd->chip_revision == Rev012A) { reg = 0x8614; } else { reg = 0x8651; red += contrast - 0x20; blue += contrast - 0x20; reg_w_val(gspca_dev, 0x8652, contrast + 0x20); /* Gr */ reg_w_val(gspca_dev, 0x8654, contrast + 0x20); /* Gb */ } reg_w_val(gspca_dev, reg, red); reg_w_val(gspca_dev, reg + 2, blue); } /* rev 12a only */ static void setexposure(struct gspca_dev *gspca_dev, s32 val) { int i, expo = 0; /* Register 0x8309 controls exposure for the spca561, the basic exposure setting goes from 1-2047, where 1 is completely dark and 2047 is very bright. It not only influences exposure but also the framerate (to allow for longer exposure) from 1 - 300 it only raises the exposure time then from 300 - 600 it halves the framerate to be able to further raise the exposure time and for every 300 more it halves the framerate again. This allows for a maximum exposure time of circa 0.2 - 0.25 seconds (30 / (2000/3000) fps). Sometimes this is not enough, the 1-2047 uses bits 0-10, bits 11-12 configure a divider for the base framerate which us used at the exposure setting of 1-300. These bits configure the base framerate according to the following formula: fps = 60 / (value + 2) */ /* We choose to use the high bits setting the fixed framerate divisor asap, as setting high basic exposure setting without the fixed divider in combination with high gains makes the cam stop */ static const int table[] = { 0, 450, 550, 625, EXPOSURE_MAX }; for (i = 0; i < ARRAY_SIZE(table) - 1; i++) { if (val <= table[i + 1]) { expo = val - table[i]; if (i) expo += 300; expo |= i << 11; break; } } gspca_dev->usb_buf[0] = expo; gspca_dev->usb_buf[1] = expo >> 8; reg_w_buf(gspca_dev, 0x8309, 2); } /* rev 12a only */ static void setgain(struct gspca_dev *gspca_dev, s32 val) { /* gain reg low 6 bits 0-63 gain, bit 6 and 7, both double the sensitivity when set, so 31 + one of them set == 63, and 15 with both of them set == 63 */ if (val < 64) gspca_dev->usb_buf[0] = val; else if (val < 128) gspca_dev->usb_buf[0] = (val / 2) | 0x40; else gspca_dev->usb_buf[0] = (val / 4) | 0xc0; gspca_dev->usb_buf[1] = 0; reg_w_buf(gspca_dev, 0x8335, 2); } static void setautogain(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; if (val) sd->ag_cnt = AG_CNT_START; else sd->ag_cnt = -1; } static int sd_start_12a(struct gspca_dev *gspca_dev) { int mode; static const __u8 Reg8391[8] = {0x92, 0x30, 0x20, 0x00, 0x0c, 0x00, 0x00, 0x00}; mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; if (mode <= 1) { /* Use compression on 320x240 and above */ reg_w_val(gspca_dev, 0x8500, 0x10 | mode); } else { /* I couldn't get the compression to work below 320x240 * Fortunately at these resolutions the bandwidth * is sufficient to push raw frames at ~20fps */ reg_w_val(gspca_dev, 0x8500, mode); } /* -- qq@kuku.eu.org */ gspca_dev->usb_buf[0] = 0xaa; gspca_dev->usb_buf[1] = 0x00; reg_w_buf(gspca_dev, 0x8307, 2); /* clock - lower 0x8X values lead to fps > 30 */ reg_w_val(gspca_dev, 0x8700, 0x8a); /* 0x8f 0x85 0x27 clock */ reg_w_val(gspca_dev, 0x8112, 0x1e | 0x20); reg_w_val(gspca_dev, 0x850b, 0x03); memcpy(gspca_dev->usb_buf, Reg8391, 8); reg_w_buf(gspca_dev, 0x8391, 8); reg_w_buf(gspca_dev, 0x8390, 8); /* Led ON (bit 3 -> 0 */ reg_w_val(gspca_dev, 0x8114, 0x00); return 0; } static int sd_start_72a(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int Clck; int mode; write_vector(gspca_dev, rev72a_reset); msleep(200); write_vector(gspca_dev, rev72a_init_data1); write_sensor_72a(gspca_dev, rev72a_init_sensor1); mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; switch (mode) { default: case 0: Clck = 0x27; /* ms-win 0x87 */ break; case 1: Clck = 0x25; break; case 2: Clck = 0x22; break; case 3: Clck = 0x21; break; } reg_w_val(gspca_dev, 0x8700, Clck); /* 0x27 clock */ reg_w_val(gspca_dev, 0x8702, 0x81); reg_w_val(gspca_dev, 0x8500, mode); /* mode */ write_sensor_72a(gspca_dev, rev72a_init_sensor2); setwhite(gspca_dev, v4l2_ctrl_g_ctrl(sd->hue), v4l2_ctrl_g_ctrl(sd->contrast)); /* setbrightness(gspca_dev); * fixme: bad values */ setautogain(gspca_dev, v4l2_ctrl_g_ctrl(sd->autogain)); reg_w_val(gspca_dev, 0x8112, 0x10 | 0x20); return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->chip_revision == Rev012A) { reg_w_val(gspca_dev, 0x8112, 0x0e); /* Led Off (bit 3 -> 1 */ reg_w_val(gspca_dev, 0x8114, 0x08); } else { reg_w_val(gspca_dev, 0x8112, 0x20); /* reg_w_val(gspca_dev, 0x8102, 0x00); ?? */ } } static void do_autogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int expotimes; int pixelclk; int gainG; __u8 R, Gr, Gb, B; int y; __u8 luma_mean = 110; __u8 luma_delta = 20; __u8 spring = 4; if (sd->ag_cnt < 0) return; if (--sd->ag_cnt >= 0) return; sd->ag_cnt = AG_CNT_START; switch (sd->chip_revision) { case Rev072A: reg_r(gspca_dev, 0x8621, 1); Gr = gspca_dev->usb_buf[0]; reg_r(gspca_dev, 0x8622, 1); R = gspca_dev->usb_buf[0]; reg_r(gspca_dev, 0x8623, 1); B = gspca_dev->usb_buf[0]; reg_r(gspca_dev, 0x8624, 1); Gb = gspca_dev->usb_buf[0]; y = (77 * R + 75 * (Gr + Gb) + 29 * B) >> 8; /* u= (128*B-(43*(Gr+Gb+R))) >> 8; */ /* v= (128*R-(53*(Gr+Gb))-21*B) >> 8; */ if (y < luma_mean - luma_delta || y > luma_mean + luma_delta) { expotimes = i2c_read(gspca_dev, 0x09, 0x10); pixelclk = 0x0800; expotimes = expotimes & 0x07ff; gainG = i2c_read(gspca_dev, 0x35, 0x10); expotimes += (luma_mean - y) >> spring; gainG += (luma_mean - y) / 50; if (gainG > 0x3f) gainG = 0x3f; else if (gainG < 3) gainG = 3; i2c_write(gspca_dev, gainG, 0x35); if (expotimes > 0x0256) expotimes = 0x0256; else if (expotimes < 3) expotimes = 3; i2c_write(gspca_dev, expotimes | pixelclk, 0x09); } break; } } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; len--; switch (*data++) { /* sequence number */ case 0: /* start of frame */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); /* This should never happen */ if (len < 2) { gspca_err(gspca_dev, "Short SOF packet, ignoring\n\n\n\n\n"); gspca_dev->last_packet_type = DISCARD_PACKET; return; } #if IS_ENABLED(CONFIG_INPUT) if (data[0] & 0x20) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); input_sync(gspca_dev->input_dev); input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); } #endif if (data[1] & 0x10) { /* compressed bayer */ gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); } else { /* raw bayer (with a header, which we skip) */ if (sd->chip_revision == Rev012A) { data += 20; len -= 20; } else { data += 16; len -= 16; } gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); } return; case 0xff: /* drop (empty mpackets) */ return; } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightness(gspca_dev, ctrl->val); break; case V4L2_CID_CONTRAST: /* hue/contrast control cluster for 72a */ setwhite(gspca_dev, sd->hue->val, ctrl->val); break; case V4L2_CID_HUE: /* just plain hue control for 12a */ setwhite(gspca_dev, ctrl->val, 0); break; case V4L2_CID_EXPOSURE: setexposure(gspca_dev, ctrl->val); break; case V4L2_CID_GAIN: setgain(gspca_dev, ctrl->val); break; case V4L2_CID_AUTOGAIN: setautogain(gspca_dev, ctrl->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls_12a(struct gspca_dev *gspca_dev) { struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 3); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HUE, 1, 0x7f, 1, 0x40); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, -128, 127, 1, 0); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 1, EXPOSURE_MAX, 1, 700); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 255, 1, 63); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } static int sd_init_controls_72a(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 4); sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 0x3f, 1, 0x20); sd->hue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HUE, 1, 0x7f, 1, 0x40); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 0x3f, 1, 0x20); sd->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } v4l2_ctrl_cluster(2, &sd->contrast); return 0; } /* sub-driver description */ static const struct sd_desc sd_desc_12a = { .name = MODULE_NAME, .init_controls = sd_init_controls_12a, .config = sd_config, .init = sd_init_12a, .start = sd_start_12a, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, #if IS_ENABLED(CONFIG_INPUT) .other_input = 1, #endif }; static const struct sd_desc sd_desc_72a = { .name = MODULE_NAME, .init_controls = sd_init_controls_72a, .config = sd_config, .init = sd_init_72a, .start = sd_start_72a, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .dq_callback = do_autogain, #if IS_ENABLED(CONFIG_INPUT) .other_input = 1, #endif }; static const struct sd_desc *sd_desc[2] = { &sd_desc_12a, &sd_desc_72a }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x041e, 0x401a), .driver_info = Rev072A}, {USB_DEVICE(0x041e, 0x403b), .driver_info = Rev012A}, {USB_DEVICE(0x0458, 0x7004), .driver_info = Rev072A}, {USB_DEVICE(0x0461, 0x0815), .driver_info = Rev072A}, {USB_DEVICE(0x046d, 0x0928), .driver_info = Rev012A}, {USB_DEVICE(0x046d, 0x0929), .driver_info = Rev012A}, {USB_DEVICE(0x046d, 0x092a), .driver_info = Rev012A}, {USB_DEVICE(0x046d, 0x092b), .driver_info = Rev012A}, {USB_DEVICE(0x046d, 0x092c), .driver_info = Rev012A}, {USB_DEVICE(0x046d, 0x092d), .driver_info = Rev012A}, {USB_DEVICE(0x046d, 0x092e), .driver_info = Rev012A}, {USB_DEVICE(0x046d, 0x092f), .driver_info = Rev012A}, {USB_DEVICE(0x04fc, 0x0561), .driver_info = Rev072A}, {USB_DEVICE(0x060b, 0xa001), .driver_info = Rev072A}, {USB_DEVICE(0x10fd, 0x7e50), .driver_info = Rev072A}, {USB_DEVICE(0xabcd, 0xcdee), .driver_info = Rev072A}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, sd_desc[id->driver_info], sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
72 14823 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_PAGE_64_H #define _ASM_X86_PAGE_64_H #include <asm/page_64_types.h> #ifndef __ASSEMBLER__ #include <asm/cpufeatures.h> #include <asm/alternative.h> #include <linux/kmsan-checks.h> /* duplicated to the one in bootmem.h */ extern unsigned long max_pfn; extern unsigned long phys_base; extern unsigned long page_offset_base; extern unsigned long vmalloc_base; extern unsigned long vmemmap_base; extern unsigned long direct_map_physmem_end; static __always_inline unsigned long __phys_addr_nodebug(unsigned long x) { unsigned long y = x - __START_KERNEL_map; /* use the carry flag to determine if x was < __START_KERNEL_map */ x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET)); return x; } #ifdef CONFIG_DEBUG_VIRTUAL extern unsigned long __phys_addr(unsigned long); extern unsigned long __phys_addr_symbol(unsigned long); #else #define __phys_addr(x) __phys_addr_nodebug(x) #define __phys_addr_symbol(x) \ ((unsigned long)(x) - __START_KERNEL_map + phys_base) #endif #define __phys_reloc_hide(x) (x) void clear_page_orig(void *page); void clear_page_rep(void *page); void clear_page_erms(void *page); static inline void clear_page(void *page) { /* * Clean up KMSAN metadata for the page being cleared. The assembly call * below clobbers @page, so we perform unpoisoning before it. */ kmsan_unpoison_memory(page, PAGE_SIZE); alternative_call_2(clear_page_orig, clear_page_rep, X86_FEATURE_REP_GOOD, clear_page_erms, X86_FEATURE_ERMS, "=D" (page), "D" (page), "cc", "memory", "rax", "rcx"); } void copy_page(void *to, void *from); KCFI_REFERENCE(copy_page); /* * User space process size. This is the first address outside the user range. * There are a few constraints that determine this: * * On Intel CPUs, if a SYSCALL instruction is at the highest canonical * address, then that syscall will enter the kernel with a * non-canonical return address, and SYSRET will explode dangerously. * We avoid this particular problem by preventing anything * from being mapped at the maximum canonical address. * * On AMD CPUs in the Ryzen family, there's a nasty bug in which the * CPUs malfunction if they execute code from the highest canonical page. * They'll speculate right off the end of the canonical space, and * bad things happen. This is worked around in the same way as the * Intel problem. * * With page table isolation enabled, we map the LDT in ... [stay tuned] */ static __always_inline unsigned long task_size_max(void) { unsigned long ret; alternative_io("movq %[small],%0","movq %[large],%0", X86_FEATURE_LA57, "=r" (ret), [small] "i" ((1ul << 47)-PAGE_SIZE), [large] "i" ((1ul << 56)-PAGE_SIZE)); return ret; } #endif /* !__ASSEMBLER__ */ #ifdef CONFIG_X86_VSYSCALL_EMULATION # define __HAVE_ARCH_GATE_AREA 1 #endif #endif /* _ASM_X86_PAGE_64_H */
47 47 47 47 47 47 47 47 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 // SPDX-License-Identifier: GPL-2.0-only /* * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong * because MTRRs can span up to 40 bits (36bits on most modern x86) */ #include <linux/export.h> #include <linux/init.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/cc_platform.h> #include <linux/string_choices.h> #include <asm/processor-flags.h> #include <asm/cacheinfo.h> #include <asm/cpufeature.h> #include <asm/cpu_device_id.h> #include <asm/hypervisor.h> #include <asm/mshyperv.h> #include <asm/tlbflush.h> #include <asm/mtrr.h> #include <asm/msr.h> #include <asm/memtype.h> #include "mtrr.h" struct fixed_range_block { int base_msr; /* start address of an MTRR block */ int ranges; /* number of MTRRs in this block */ }; static struct fixed_range_block fixed_range_blocks[] = { { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ {} }; struct cache_map { u64 start; u64 end; u64 flags; u64 type:8; u64 fixed:1; }; bool mtrr_debug; static int __init mtrr_param_setup(char *str) { int rc = 0; if (!str) return -EINVAL; if (!strcmp(str, "debug")) mtrr_debug = true; else rc = -EINVAL; return rc; } early_param("mtrr", mtrr_param_setup); /* * CACHE_MAP_MAX is the maximum number of memory ranges in cache_map, where * no 2 adjacent ranges have the same cache mode (those would be merged). * The number is based on the worst case: * - no two adjacent fixed MTRRs share the same cache mode * - one variable MTRR is spanning a huge area with mode WB * - 255 variable MTRRs with mode UC all overlap with the WB MTRR, creating 2 * additional ranges each (result like "ababababa...aba" with a = WB, b = UC), * accounting for MTRR_MAX_VAR_RANGES * 2 - 1 range entries * - a TOP_MEM2 area (even with overlapping an UC MTRR can't add 2 range entries * to the possible maximum, as it always starts at 4GB, thus it can't be in * the middle of that MTRR, unless that MTRR starts at 0, which would remove * the initial "a" from the "abababa" pattern above) * The map won't contain ranges with no matching MTRR (those fall back to the * default cache mode). */ #define CACHE_MAP_MAX (MTRR_NUM_FIXED_RANGES + MTRR_MAX_VAR_RANGES * 2) static struct cache_map init_cache_map[CACHE_MAP_MAX] __initdata; static struct cache_map *cache_map __refdata = init_cache_map; static unsigned int cache_map_size = CACHE_MAP_MAX; static unsigned int cache_map_n; static unsigned int cache_map_fixed; static unsigned long smp_changes_mask; static int mtrr_state_set; u64 mtrr_tom2; struct mtrr_state_type mtrr_state; EXPORT_SYMBOL_GPL(mtrr_state); /* Reserved bits in the high portion of the MTRRphysBaseN MSR. */ u32 phys_hi_rsvd; /* * BIOS is expected to clear MtrrFixDramModEn bit, see for example * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD * Opteron Processors" (26094 Rev. 3.30 February 2006), section * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set * to 1 during BIOS initialization of the fixed MTRRs, then cleared to * 0 for operation." */ static inline void k8_check_syscfg_dram_mod_en(void) { u32 lo, hi; if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0x0f))) return; if (cc_platform_has(CC_ATTR_HOST_SEV_SNP)) return; rdmsr(MSR_AMD64_SYSCFG, lo, hi); if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) { pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]" " not cleared by BIOS, clearing this bit\n", smp_processor_id()); lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY; mtrr_wrmsr(MSR_AMD64_SYSCFG, lo, hi); } } /* Get the size of contiguous MTRR range */ static u64 get_mtrr_size(u64 mask) { u64 size; mask |= (u64)phys_hi_rsvd << 32; size = -mask; return size; } static u8 get_var_mtrr_state(unsigned int reg, u64 *start, u64 *size) { struct mtrr_var_range *mtrr = mtrr_state.var_ranges + reg; if (!(mtrr->mask_lo & MTRR_PHYSMASK_V)) return MTRR_TYPE_INVALID; *start = (((u64)mtrr->base_hi) << 32) + (mtrr->base_lo & PAGE_MASK); *size = get_mtrr_size((((u64)mtrr->mask_hi) << 32) + (mtrr->mask_lo & PAGE_MASK)); return mtrr->base_lo & MTRR_PHYSBASE_TYPE; } static u8 get_effective_type(u8 type1, u8 type2) { if (type1 == MTRR_TYPE_UNCACHABLE || type2 == MTRR_TYPE_UNCACHABLE) return MTRR_TYPE_UNCACHABLE; if ((type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH) || (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK)) return MTRR_TYPE_WRTHROUGH; if (type1 != type2) return MTRR_TYPE_UNCACHABLE; return type1; } static void rm_map_entry_at(int idx) { cache_map_n--; if (cache_map_n > idx) { memmove(cache_map + idx, cache_map + idx + 1, sizeof(*cache_map) * (cache_map_n - idx)); } } /* * Add an entry into cache_map at a specific index. Merges adjacent entries if * appropriate. Return the number of merges for correcting the scan index * (this is needed as merging will reduce the number of entries, which will * result in skipping entries in future iterations if the scan index isn't * corrected). * Note that the corrected index can never go below -1 (resulting in being 0 in * the next scan iteration), as "2" is returned only if the current index is * larger than zero. */ static int add_map_entry_at(u64 start, u64 end, u8 type, int idx) { bool merge_prev = false, merge_next = false; if (start >= end) return 0; if (idx > 0) { struct cache_map *prev = cache_map + idx - 1; if (!prev->fixed && start == prev->end && type == prev->type) merge_prev = true; } if (idx < cache_map_n) { struct cache_map *next = cache_map + idx; if (!next->fixed && end == next->start && type == next->type) merge_next = true; } if (merge_prev && merge_next) { cache_map[idx - 1].end = cache_map[idx].end; rm_map_entry_at(idx); return 2; } if (merge_prev) { cache_map[idx - 1].end = end; return 1; } if (merge_next) { cache_map[idx].start = start; return 1; } /* Sanity check: the array should NEVER be too small! */ if (cache_map_n == cache_map_size) { WARN(1, "MTRR cache mode memory map exhausted!\n"); cache_map_n = cache_map_fixed; return 0; } if (cache_map_n > idx) { memmove(cache_map + idx + 1, cache_map + idx, sizeof(*cache_map) * (cache_map_n - idx)); } cache_map[idx].start = start; cache_map[idx].end = end; cache_map[idx].type = type; cache_map[idx].fixed = 0; cache_map_n++; return 0; } /* Clear a part of an entry. Return 1 if start of entry is still valid. */ static int clr_map_range_at(u64 start, u64 end, int idx) { int ret = start != cache_map[idx].start; u64 tmp; if (start == cache_map[idx].start && end == cache_map[idx].end) { rm_map_entry_at(idx); } else if (start == cache_map[idx].start) { cache_map[idx].start = end; } else if (end == cache_map[idx].end) { cache_map[idx].end = start; } else { tmp = cache_map[idx].end; cache_map[idx].end = start; add_map_entry_at(end, tmp, cache_map[idx].type, idx + 1); } return ret; } /* * Add MTRR to the map. The current map is scanned and each part of the MTRR * either overlapping with an existing entry or with a hole in the map is * handled separately. */ static void add_map_entry(u64 start, u64 end, u8 type) { u8 new_type, old_type; u64 tmp; int i; for (i = 0; i < cache_map_n && start < end; i++) { if (start >= cache_map[i].end) continue; if (start < cache_map[i].start) { /* Region start has no overlap. */ tmp = min(end, cache_map[i].start); i -= add_map_entry_at(start, tmp, type, i); start = tmp; continue; } new_type = get_effective_type(type, cache_map[i].type); old_type = cache_map[i].type; if (cache_map[i].fixed || new_type == old_type) { /* Cut off start of new entry. */ start = cache_map[i].end; continue; } /* Handle only overlapping part of region. */ tmp = min(end, cache_map[i].end); i += clr_map_range_at(start, tmp, i); i -= add_map_entry_at(start, tmp, new_type, i); start = tmp; } /* Add rest of region after last map entry (rest might be empty). */ add_map_entry_at(start, end, type, i); } /* Add variable MTRRs to cache map. */ static void map_add_var(void) { u64 start, size; unsigned int i; u8 type; /* * Add AMD TOP_MEM2 area. Can't be added in mtrr_build_map(), as it * needs to be added again when rebuilding the map due to potentially * having moved as a result of variable MTRRs for memory below 4GB. */ if (mtrr_tom2) { add_map_entry(BIT_ULL(32), mtrr_tom2, MTRR_TYPE_WRBACK); cache_map[cache_map_n - 1].fixed = 1; } for (i = 0; i < num_var_ranges; i++) { type = get_var_mtrr_state(i, &start, &size); if (type != MTRR_TYPE_INVALID) add_map_entry(start, start + size, type); } } /* * Rebuild map by replacing variable entries. Needs to be called when MTRR * registers are being changed after boot, as such changes could include * removals of registers, which are complicated to handle without rebuild of * the map. */ void generic_rebuild_map(void) { if (mtrr_if != &generic_mtrr_ops) return; cache_map_n = cache_map_fixed; map_add_var(); } static unsigned int __init get_cache_map_size(void) { return cache_map_fixed + 2 * num_var_ranges + (mtrr_tom2 != 0); } /* Build the cache_map containing the cache modes per memory range. */ void __init mtrr_build_map(void) { u64 start, end, size; unsigned int i; u8 type; /* Add fixed MTRRs, optimize for adjacent entries with same type. */ if (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED) { /* * Start with 64k size fixed entries, preset 1st one (hence the * loop below is starting with index 1). */ start = 0; end = size = 0x10000; type = mtrr_state.fixed_ranges[0]; for (i = 1; i < MTRR_NUM_FIXED_RANGES; i++) { /* 8 64k entries, then 16 16k ones, rest 4k. */ if (i == 8 || i == 24) size >>= 2; if (mtrr_state.fixed_ranges[i] != type) { add_map_entry(start, end, type); start = end; type = mtrr_state.fixed_ranges[i]; } end += size; } add_map_entry(start, end, type); } /* Mark fixed, they take precedence. */ for (i = 0; i < cache_map_n; i++) cache_map[i].fixed = 1; cache_map_fixed = cache_map_n; map_add_var(); pr_info("MTRR map: %u entries (%u fixed + %u variable; max %u), built from %u variable MTRRs\n", cache_map_n, cache_map_fixed, cache_map_n - cache_map_fixed, get_cache_map_size(), num_var_ranges + (mtrr_tom2 != 0)); if (mtrr_debug) { for (i = 0; i < cache_map_n; i++) { pr_info("%3u: %016llx-%016llx %s\n", i, cache_map[i].start, cache_map[i].end - 1, mtrr_attrib_to_str(cache_map[i].type)); } } } /* Copy the cache_map from __initdata memory to dynamically allocated one. */ void __init mtrr_copy_map(void) { unsigned int new_size = get_cache_map_size(); if (!mtrr_state.enabled || !new_size) { cache_map = NULL; return; } mutex_lock(&mtrr_mutex); cache_map = kcalloc(new_size, sizeof(*cache_map), GFP_KERNEL); if (cache_map) { memmove(cache_map, init_cache_map, cache_map_n * sizeof(*cache_map)); cache_map_size = new_size; } else { mtrr_state.enabled = 0; pr_err("MTRRs disabled due to allocation failure for lookup map.\n"); } mutex_unlock(&mtrr_mutex); } /** * guest_force_mtrr_state - set static MTRR state for a guest * * Used to set MTRR state via different means (e.g. with data obtained from * a hypervisor). * Is allowed only for special cases when running virtualized. Must be called * from the x86_init.hyper.init_platform() hook. It can be called only once. * The MTRR state can't be changed afterwards. To ensure that, X86_FEATURE_MTRR * is cleared. * * @var: MTRR variable range array to use * @num_var: length of the @var array * @def_type: default caching type */ void guest_force_mtrr_state(struct mtrr_var_range *var, unsigned int num_var, mtrr_type def_type) { unsigned int i; /* Only allowed to be called once before mtrr_bp_init(). */ if (WARN_ON_ONCE(mtrr_state_set)) return; /* Only allowed when running virtualized. */ if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) return; /* * Only allowed for special virtualization cases: * - when running as Hyper-V, SEV-SNP guest using vTOM * - when running as Xen PV guest * - when running as SEV-SNP or TDX guest to avoid unnecessary * VMM communication/Virtualization exceptions (#VC, #VE) */ if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !hv_is_isolation_supported() && !cpu_feature_enabled(X86_FEATURE_XENPV) && !cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) return; /* Disable MTRR in order to disable MTRR modifications. */ setup_clear_cpu_cap(X86_FEATURE_MTRR); if (var) { if (num_var > MTRR_MAX_VAR_RANGES) { pr_warn("Trying to overwrite MTRR state with %u variable entries\n", num_var); num_var = MTRR_MAX_VAR_RANGES; } for (i = 0; i < num_var; i++) mtrr_state.var_ranges[i] = var[i]; num_var_ranges = num_var; } mtrr_state.def_type = def_type; mtrr_state.enabled |= MTRR_STATE_MTRR_ENABLED; mtrr_state_set = 1; } static u8 type_merge(u8 type, u8 new_type, u8 *uniform) { u8 effective_type; if (type == MTRR_TYPE_INVALID) return new_type; effective_type = get_effective_type(type, new_type); if (type != effective_type) *uniform = 0; return effective_type; } /** * mtrr_type_lookup - look up memory type in MTRR * * @start: Begin of the physical address range * @end: End of the physical address range * @uniform: output argument: * - 1: the returned MTRR type is valid for the whole region * - 0: otherwise * * Return Values: * MTRR_TYPE_(type) - The effective MTRR type for the region * MTRR_TYPE_INVALID - MTRR is disabled */ u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform) { u8 type = MTRR_TYPE_INVALID; unsigned int i; if (!mtrr_state_set) { /* Uniformity is unknown. */ *uniform = 0; return MTRR_TYPE_UNCACHABLE; } *uniform = 1; if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED)) return MTRR_TYPE_UNCACHABLE; for (i = 0; i < cache_map_n && start < end; i++) { /* Region after current map entry? -> continue with next one. */ if (start >= cache_map[i].end) continue; /* Start of region not covered by current map entry? */ if (start < cache_map[i].start) { /* At least some part of region has default type. */ type = type_merge(type, mtrr_state.def_type, uniform); /* End of region not covered, too? -> lookup done. */ if (end <= cache_map[i].start) return type; } /* At least part of region covered by map entry. */ type = type_merge(type, cache_map[i].type, uniform); start = cache_map[i].end; } /* End of region past last entry in map? -> use default type. */ if (start < end) type = type_merge(type, mtrr_state.def_type, uniform); return type; } /* Get the MSR pair relating to a var range */ static void get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) { rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); } /* Fill the MSR pair relating to a var range */ void fill_mtrr_var_range(unsigned int index, u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) { struct mtrr_var_range *vr; vr = mtrr_state.var_ranges; vr[index].base_lo = base_lo; vr[index].base_hi = base_hi; vr[index].mask_lo = mask_lo; vr[index].mask_hi = mask_hi; } static void get_fixed_ranges(mtrr_type *frs) { unsigned int *p = (unsigned int *)frs; int i; k8_check_syscfg_dram_mod_en(); rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]); for (i = 0; i < 2; i++) rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]); for (i = 0; i < 8; i++) rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]); } void mtrr_save_fixed_ranges(void *info) { if (mtrr_state.have_fixed) get_fixed_ranges(mtrr_state.fixed_ranges); } static unsigned __initdata last_fixed_start; static unsigned __initdata last_fixed_end; static mtrr_type __initdata last_fixed_type; static void __init print_fixed_last(void) { if (!last_fixed_end) return; pr_info(" %05X-%05X %s\n", last_fixed_start, last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); last_fixed_end = 0; } static void __init update_fixed_last(unsigned base, unsigned end, mtrr_type type) { last_fixed_start = base; last_fixed_end = end; last_fixed_type = type; } static void __init print_fixed(unsigned base, unsigned step, const mtrr_type *types) { unsigned i; for (i = 0; i < 8; ++i, ++types, base += step) { if (last_fixed_end == 0) { update_fixed_last(base, base + step, *types); continue; } if (last_fixed_end == base && last_fixed_type == *types) { last_fixed_end = base + step; continue; } /* new segments: gap or different type */ print_fixed_last(); update_fixed_last(base, base + step, *types); } } static void __init print_mtrr_state(void) { unsigned int i; int high_width; pr_info("MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type)); if (mtrr_state.have_fixed) { pr_info("MTRR fixed ranges %s:\n", str_enabled_disabled( (mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) && (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED))); print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); for (i = 0; i < 2; ++i) print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8); for (i = 0; i < 8; ++i) print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8); /* tail */ print_fixed_last(); } pr_info("MTRR variable ranges %s:\n", str_enabled_disabled(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED)); high_width = (boot_cpu_data.x86_phys_bits - (32 - PAGE_SHIFT) + 3) / 4; for (i = 0; i < num_var_ranges; ++i) { if (mtrr_state.var_ranges[i].mask_lo & MTRR_PHYSMASK_V) pr_info(" %u base %0*X%05X000 mask %0*X%05X000 %s\n", i, high_width, mtrr_state.var_ranges[i].base_hi, mtrr_state.var_ranges[i].base_lo >> 12, high_width, mtrr_state.var_ranges[i].mask_hi, mtrr_state.var_ranges[i].mask_lo >> 12, mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & MTRR_PHYSBASE_TYPE)); else pr_info(" %u disabled\n", i); } if (mtrr_tom2) pr_info("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20); } /* Grab all of the MTRR state for this CPU into *state */ bool __init get_mtrr_state(void) { struct mtrr_var_range *vrs; unsigned lo, dummy; unsigned int i; vrs = mtrr_state.var_ranges; rdmsr(MSR_MTRRcap, lo, dummy); mtrr_state.have_fixed = lo & MTRR_CAP_FIX; for (i = 0; i < num_var_ranges; i++) get_mtrr_var_range(i, &vrs[i]); if (mtrr_state.have_fixed) get_fixed_ranges(mtrr_state.fixed_ranges); rdmsr(MSR_MTRRdefType, lo, dummy); mtrr_state.def_type = lo & MTRR_DEF_TYPE_TYPE; mtrr_state.enabled = (lo & MTRR_DEF_TYPE_ENABLE) >> MTRR_STATE_SHIFT; if (amd_special_default_mtrr()) { unsigned low, high; /* TOP_MEM2 */ rdmsr(MSR_K8_TOP_MEM2, low, high); mtrr_tom2 = high; mtrr_tom2 <<= 32; mtrr_tom2 |= low; mtrr_tom2 &= 0xffffff800000ULL; } if (mtrr_debug) print_mtrr_state(); mtrr_state_set = 1; return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED); } /* Some BIOS's are messed up and don't set all MTRRs the same! */ void __init mtrr_state_warn(void) { unsigned long mask = smp_changes_mask; if (!mask) return; if (mask & MTRR_CHANGE_MASK_FIXED) pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n"); if (mask & MTRR_CHANGE_MASK_VARIABLE) pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n"); if (mask & MTRR_CHANGE_MASK_DEFTYPE) pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n"); pr_info("mtrr: probably your BIOS does not setup all CPUs.\n"); pr_info("mtrr: corrected configuration.\n"); } /* * Doesn't attempt to pass an error out to MTRR users * because it's quite complicated in some cases and probably not * worth it because the best error handling is to ignore it. */ void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) { if (wrmsr_safe(msr, a, b) < 0) { pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", smp_processor_id(), msr, a, b); } } /** * set_fixed_range - checks & updates a fixed-range MTRR if it * differs from the value it should have * @msr: MSR address of the MTTR which should be checked and updated * @changed: pointer which indicates whether the MTRR needed to be changed * @msrwords: pointer to the MSR values which the MSR should have */ static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) { unsigned lo, hi; rdmsr(msr, lo, hi); if (lo != msrwords[0] || hi != msrwords[1]) { mtrr_wrmsr(msr, msrwords[0], msrwords[1]); *changed = true; } } /** * generic_get_free_region - Get a free MTRR. * @base: The starting (base) address of the region. * @size: The size (in bytes) of the region. * @replace_reg: mtrr index to be replaced; set to invalid value if none. * * Returns: The index of the region on success, else negative on error. */ int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) { unsigned long lbase, lsize; mtrr_type ltype; int i, max; max = num_var_ranges; if (replace_reg >= 0 && replace_reg < max) return replace_reg; for (i = 0; i < max; ++i) { mtrr_if->get(i, &lbase, &lsize, &ltype); if (lsize == 0) return i; } return -ENOSPC; } static void generic_get_mtrr(unsigned int reg, unsigned long *base, unsigned long *size, mtrr_type *type) { u32 mask_lo, mask_hi, base_lo, base_hi; unsigned int hi; u64 tmp, mask; /* * get_mtrr doesn't need to update mtrr_state, also it could be called * from any cpu, so try to print it out directly. */ get_cpu(); rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); if (!(mask_lo & MTRR_PHYSMASK_V)) { /* Invalid (i.e. free) range */ *base = 0; *size = 0; *type = 0; goto out_put_cpu; } rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); /* Work out the shifted address mask: */ tmp = (u64)mask_hi << 32 | (mask_lo & PAGE_MASK); mask = (u64)phys_hi_rsvd << 32 | tmp; /* Expand tmp with high bits to all 1s: */ hi = fls64(tmp); if (hi > 0) { tmp |= ~((1ULL<<(hi - 1)) - 1); if (tmp != mask) { pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n"); add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); mask = tmp; } } /* * This works correctly if size is a power of two, i.e. a * contiguous range: */ *size = -mask >> PAGE_SHIFT; *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; *type = base_lo & MTRR_PHYSBASE_TYPE; out_put_cpu: put_cpu(); } /** * set_fixed_ranges - checks & updates the fixed-range MTRRs if they * differ from the saved set * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() */ static int set_fixed_ranges(mtrr_type *frs) { unsigned long long *saved = (unsigned long long *)frs; bool changed = false; int block = -1, range; k8_check_syscfg_dram_mod_en(); while (fixed_range_blocks[++block].ranges) { for (range = 0; range < fixed_range_blocks[block].ranges; range++) set_fixed_range(fixed_range_blocks[block].base_msr + range, &changed, (unsigned int *)saved++); } return changed; } /* * Set the MSR pair relating to a var range. * Returns true if changes are made. */ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) { unsigned int lo, hi; bool changed = false; rdmsr(MTRRphysBase_MSR(index), lo, hi); if ((vr->base_lo & ~MTRR_PHYSBASE_RSVD) != (lo & ~MTRR_PHYSBASE_RSVD) || (vr->base_hi & ~phys_hi_rsvd) != (hi & ~phys_hi_rsvd)) { mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); changed = true; } rdmsr(MTRRphysMask_MSR(index), lo, hi); if ((vr->mask_lo & ~MTRR_PHYSMASK_RSVD) != (lo & ~MTRR_PHYSMASK_RSVD) || (vr->mask_hi & ~phys_hi_rsvd) != (hi & ~phys_hi_rsvd)) { mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); changed = true; } return changed; } static u32 deftype_lo, deftype_hi; /** * set_mtrr_state - Set the MTRR state for this CPU. * * NOTE: The CPU must already be in a safe state for MTRR changes, including * measures that only a single CPU can be active in set_mtrr_state() in * order to not be subject to races for usage of deftype_lo. This is * accomplished by taking cache_disable_lock. * RETURNS: 0 if no changes made, else a mask indicating what was changed. */ static unsigned long set_mtrr_state(void) { unsigned long change_mask = 0; unsigned int i; for (i = 0; i < num_var_ranges; i++) { if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) change_mask |= MTRR_CHANGE_MASK_VARIABLE; } if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) change_mask |= MTRR_CHANGE_MASK_FIXED; /* * Set_mtrr_restore restores the old value of MTRRdefType, * so to set it we fiddle with the saved value: */ if ((deftype_lo & MTRR_DEF_TYPE_TYPE) != mtrr_state.def_type || ((deftype_lo & MTRR_DEF_TYPE_ENABLE) >> MTRR_STATE_SHIFT) != mtrr_state.enabled) { deftype_lo = (deftype_lo & MTRR_DEF_TYPE_DISABLE) | mtrr_state.def_type | (mtrr_state.enabled << MTRR_STATE_SHIFT); change_mask |= MTRR_CHANGE_MASK_DEFTYPE; } return change_mask; } void mtrr_disable(void) { /* Save MTRR state */ rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); /* Disable MTRRs, and set the default type to uncached */ mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & MTRR_DEF_TYPE_DISABLE, deftype_hi); } void mtrr_enable(void) { /* Intel (P6) standard MTRRs */ mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); } void mtrr_generic_set_state(void) { unsigned long mask, count; /* Actually set the state */ mask = set_mtrr_state(); /* Use the atomic bitops to update the global mask */ for (count = 0; count < sizeof(mask) * 8; ++count) { if (mask & 0x01) set_bit(count, &smp_changes_mask); mask >>= 1; } } /** * generic_set_mtrr - set variable MTRR register on the local CPU. * * @reg: The register to set. * @base: The base address of the region. * @size: The size of the region. If this is 0 the region is disabled. * @type: The type of the region. * * Returns nothing. */ static void generic_set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) { unsigned long flags; struct mtrr_var_range *vr; vr = &mtrr_state.var_ranges[reg]; local_irq_save(flags); cache_disable(); if (size == 0) { /* * The invalid bit is kept in the mask, so we simply * clear the relevant mask register to disable a range. */ mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); memset(vr, 0, sizeof(struct mtrr_var_range)); } else { vr->base_lo = base << PAGE_SHIFT | type; vr->base_hi = (base >> (32 - PAGE_SHIFT)) & ~phys_hi_rsvd; vr->mask_lo = -size << PAGE_SHIFT | MTRR_PHYSMASK_V; vr->mask_hi = (-size >> (32 - PAGE_SHIFT)) & ~phys_hi_rsvd; mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi); mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi); } cache_enable(); local_irq_restore(flags); } int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) { unsigned long lbase, last; /* * For Intel PPro stepping <= 7 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF */ if (mtrr_if == &generic_mtrr_ops && boot_cpu_data.x86_vfm == INTEL_PENTIUM_PRO && boot_cpu_data.x86_stepping <= 7) { if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); return -EINVAL; } if (!(base + size < 0x70000 || base > 0x7003F) && (type == MTRR_TYPE_WRCOMB || type == MTRR_TYPE_WRBACK)) { pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); return -EINVAL; } } /* * Check upper bits of base and last are equal and lower bits are 0 * for base and 1 for last */ last = base + size - 1; for (lbase = base; !(lbase & 1) && (last & 1); lbase = lbase >> 1, last = last >> 1) ; if (lbase != last) { pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size); return -EINVAL; } return 0; } static int generic_have_wrcomb(void) { unsigned long config, dummy; rdmsr(MSR_MTRRcap, config, dummy); return config & MTRR_CAP_WC; } int positive_have_wrcomb(void) { return 1; } /* * Generic structure... */ const struct mtrr_ops generic_mtrr_ops = { .get = generic_get_mtrr, .get_free_region = generic_get_free_region, .set = generic_set_mtrr, .validate_add_page = generic_validate_add_page, .have_wrcomb = generic_have_wrcomb, };
655 6425 13822 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 /* SPDX-License-Identifier: GPL-2.0 */ /* * This file provides wrappers with sanitizer instrumentation for bit * locking operations. * * To use this functionality, an arch's bitops.h file needs to define each of * the below bit operations with an arch_ prefix (e.g. arch_set_bit(), * arch___set_bit(), etc.). */ #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H #define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H #include <linux/instrumented.h> /** * clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * * This operation is atomic and provides release barrier semantics. */ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) { kcsan_release(); instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_clear_bit_unlock(nr, addr); } /** * __clear_bit_unlock - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * This is a non-atomic operation but implies a release barrier before the * memory operation. It can be used for an unlock if no other CPUs can * concurrently modify other bits in the word. */ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) { kcsan_release(); instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___clear_bit_unlock(nr, addr); } /** * test_and_set_bit_lock - Set a bit and return its old value, for lock * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and provides acquire barrier semantics if * the returned value is 0. * It can be used to implement bit locks. */ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) { instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_set_bit_lock(nr, addr); } /** * xor_unlock_is_negative_byte - XOR a single byte in memory and test if * it is negative, for unlock. * @mask: Change the bits which are set in this mask. * @addr: The address of the word containing the byte to change. * * Changes some of bits 0-6 in the word pointed to by @addr. * This operation is atomic and provides release barrier semantics. * Used to optimise some folio operations which are commonly paired * with an unlock or end of writeback. Bit 7 is used as PG_waiters to * indicate whether anybody is waiting for the unlock. * * Return: Whether the top bit of the byte is set. */ static inline bool xor_unlock_is_negative_byte(unsigned long mask, volatile unsigned long *addr) { kcsan_release(); instrument_atomic_write(addr, sizeof(long)); return arch_xor_unlock_is_negative_byte(mask, addr); } #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */
2255 1811 1761 8 955 953 4 4 40 12 17 16 730 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TIME64_H #define _LINUX_TIME64_H #include <linux/math64.h> #include <vdso/time64.h> typedef __s64 time64_t; typedef __u64 timeu64_t; #include <uapi/linux/time.h> struct timespec64 { time64_t tv_sec; /* seconds */ long tv_nsec; /* nanoseconds */ }; struct itimerspec64 { struct timespec64 it_interval; struct timespec64 it_value; }; /* Parameters used to convert the timespec values: */ #define PSEC_PER_NSEC 1000L /* Located here for timespec[64]_valid_strict */ #define TIME64_MAX ((s64)~((u64)1 << 63)) #define TIME64_MIN (-TIME64_MAX - 1) #define KTIME_MAX ((s64)~((u64)1 << 63)) #define KTIME_MIN (-KTIME_MAX - 1) #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) #define KTIME_SEC_MIN (KTIME_MIN / NSEC_PER_SEC) /* * Limits for settimeofday(): * * To prevent setting the time close to the wraparound point time setting * is limited so a reasonable uptime can be accomodated. Uptime of 30 years * should be really sufficient, which means the cutoff is 2232. At that * point the cutoff is just a small part of the larger problem. */ #define TIME_UPTIME_SEC_MAX (30LL * 365 * 24 *3600) #define TIME_SETTOD_SEC_MAX (KTIME_SEC_MAX - TIME_UPTIME_SEC_MAX) static inline int timespec64_equal(const struct timespec64 *a, const struct timespec64 *b) { return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); } static inline bool timespec64_is_epoch(const struct timespec64 *ts) { return ts->tv_sec == 0 && ts->tv_nsec == 0; } /* * lhs < rhs: return <0 * lhs == rhs: return 0 * lhs > rhs: return >0 */ static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs) { if (lhs->tv_sec < rhs->tv_sec) return -1; if (lhs->tv_sec > rhs->tv_sec) return 1; return lhs->tv_nsec - rhs->tv_nsec; } extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec); static inline struct timespec64 timespec64_add(struct timespec64 lhs, struct timespec64 rhs) { struct timespec64 ts_delta; set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec, lhs.tv_nsec + rhs.tv_nsec); return ts_delta; } /* * sub = lhs - rhs, in normalized form */ static inline struct timespec64 timespec64_sub(struct timespec64 lhs, struct timespec64 rhs) { struct timespec64 ts_delta; set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec, lhs.tv_nsec - rhs.tv_nsec); return ts_delta; } /* * Returns true if the timespec64 is norm, false if denorm: */ static inline bool timespec64_valid(const struct timespec64 *ts) { /* Dates before 1970 are bogus */ if (ts->tv_sec < 0) return false; /* Can't have more nanoseconds then a second */ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) return false; return true; } static inline bool timespec64_valid_strict(const struct timespec64 *ts) { if (!timespec64_valid(ts)) return false; /* Disallow values that could overflow ktime_t */ if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) return false; return true; } static inline bool timespec64_valid_settod(const struct timespec64 *ts) { if (!timespec64_valid(ts)) return false; /* Disallow values which cause overflow issues vs. CLOCK_REALTIME */ if ((unsigned long long)ts->tv_sec >= TIME_SETTOD_SEC_MAX) return false; return true; } /** * timespec64_to_ns - Convert timespec64 to nanoseconds * @ts: pointer to the timespec64 variable to be converted * * Returns the scalar nanosecond representation of the timespec64 * parameter. */ static inline s64 timespec64_to_ns(const struct timespec64 *ts) { /* Prevent multiplication overflow / underflow */ if (ts->tv_sec >= KTIME_SEC_MAX) return KTIME_MAX; if (ts->tv_sec <= KTIME_SEC_MIN) return KTIME_MIN; return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; } /** * ns_to_timespec64 - Convert nanoseconds to timespec64 * @nsec: the nanoseconds value to be converted * * Returns the timespec64 representation of the nsec parameter. */ extern struct timespec64 ns_to_timespec64(s64 nsec); /** * timespec64_add_ns - Adds nanoseconds to a timespec64 * @a: pointer to timespec64 to be incremented * @ns: unsigned nanoseconds value to be added * * This must always be inlined because its used from the x86-64 vdso, * which cannot call other kernel functions. */ static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns) { a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); a->tv_nsec = ns; } /* * timespec64_add_safe assumes both values are positive and checks for * overflow. It will return TIME64_MAX in case of overflow. */ extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs, const struct timespec64 rhs); #endif /* _LINUX_TIME64_H */
716 10 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM task #if !defined(_TRACE_TASK_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_TASK_H #include <linux/tracepoint.h> TRACE_EVENT(task_newtask, TP_PROTO(struct task_struct *task, u64 clone_flags), TP_ARGS(task, clone_flags), TP_STRUCT__entry( __field( pid_t, pid) __array( char, comm, TASK_COMM_LEN) __field( u64, clone_flags) __field( short, oom_score_adj) ), TP_fast_assign( __entry->pid = task->pid; memcpy(__entry->comm, task->comm, TASK_COMM_LEN); __entry->clone_flags = clone_flags; __entry->oom_score_adj = task->signal->oom_score_adj; ), TP_printk("pid=%d comm=%s clone_flags=%llx oom_score_adj=%hd", __entry->pid, __entry->comm, __entry->clone_flags, __entry->oom_score_adj) ); TRACE_EVENT(task_rename, TP_PROTO(struct task_struct *task, const char *comm), TP_ARGS(task, comm), TP_STRUCT__entry( __array( char, oldcomm, TASK_COMM_LEN) __array( char, newcomm, TASK_COMM_LEN) __field( short, oom_score_adj) ), TP_fast_assign( memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN); strscpy(entry->newcomm, comm, TASK_COMM_LEN); __entry->oom_score_adj = task->signal->oom_score_adj; ), TP_printk("oldcomm=%s newcomm=%s oom_score_adj=%hd", __entry->oldcomm, __entry->newcomm, __entry->oom_score_adj) ); /** * task_prctl_unknown - called on unknown prctl() option * @option: option passed * @arg2: arg2 passed * @arg3: arg3 passed * @arg4: arg4 passed * @arg5: arg5 passed * * Called on an unknown prctl() option. */ TRACE_EVENT(task_prctl_unknown, TP_PROTO(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5), TP_ARGS(option, arg2, arg3, arg4, arg5), TP_STRUCT__entry( __field( int, option) __field( unsigned long, arg2) __field( unsigned long, arg3) __field( unsigned long, arg4) __field( unsigned long, arg5) ), TP_fast_assign( __entry->option = option; __entry->arg2 = arg2; __entry->arg3 = arg3; __entry->arg4 = arg4; __entry->arg5 = arg5; ), TP_printk("option=%d arg2=%ld arg3=%ld arg4=%ld arg5=%ld", __entry->option, __entry->arg2, __entry->arg3, __entry->arg4, __entry->arg5) ); #endif /* This part must be outside protection */ #include <trace/define_trace.h>
67 6 4 4 3 3 3 3 4 4 6 3 6 5 5 6 1790 1 1 1 8 5 8 8 17 16 16 16 6 19 19 18 18 1 17 6 16 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 // SPDX-License-Identifier: GPL-2.0 /* * Copyright IBM Corporation, 2021 * * Author: Mike Rapoport <rppt@linux.ibm.com> */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/swap.h> #include <linux/mount.h> #include <linux/memfd.h> #include <linux/bitops.h> #include <linux/printk.h> #include <linux/pagemap.h> #include <linux/syscalls.h> #include <linux/pseudo_fs.h> #include <linux/secretmem.h> #include <linux/set_memory.h> #include <linux/sched/signal.h> #include <uapi/linux/magic.h> #include <asm/tlbflush.h> #include "internal.h" #undef pr_fmt #define pr_fmt(fmt) "secretmem: " fmt /* * Define mode and flag masks to allow validation of the system call * parameters. */ #define SECRETMEM_MODE_MASK (0x0) #define SECRETMEM_FLAGS_MASK SECRETMEM_MODE_MASK static bool secretmem_enable __ro_after_init = 1; module_param_named(enable, secretmem_enable, bool, 0400); MODULE_PARM_DESC(secretmem_enable, "Enable secretmem and memfd_secret(2) system call"); static atomic_t secretmem_users; bool secretmem_active(void) { return !!atomic_read(&secretmem_users); } static vm_fault_t secretmem_fault(struct vm_fault *vmf) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; struct inode *inode = file_inode(vmf->vma->vm_file); pgoff_t offset = vmf->pgoff; gfp_t gfp = vmf->gfp_mask; unsigned long addr; struct folio *folio; vm_fault_t ret; int err; if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode)) return vmf_error(-EINVAL); filemap_invalidate_lock_shared(mapping); retry: folio = filemap_lock_folio(mapping, offset); if (IS_ERR(folio)) { folio = folio_alloc(gfp | __GFP_ZERO, 0); if (!folio) { ret = VM_FAULT_OOM; goto out; } err = set_direct_map_invalid_noflush(folio_page(folio, 0)); if (err) { folio_put(folio); ret = vmf_error(err); goto out; } __folio_mark_uptodate(folio); err = filemap_add_folio(mapping, folio, offset, gfp); if (unlikely(err)) { folio_put(folio); /* * If a split of large page was required, it * already happened when we marked the page invalid * which guarantees that this call won't fail */ set_direct_map_default_noflush(folio_page(folio, 0)); if (err == -EEXIST) goto retry; ret = vmf_error(err); goto out; } addr = (unsigned long)folio_address(folio); flush_tlb_kernel_range(addr, addr + PAGE_SIZE); } vmf->page = folio_file_page(folio, vmf->pgoff); ret = VM_FAULT_LOCKED; out: filemap_invalidate_unlock_shared(mapping); return ret; } static const struct vm_operations_struct secretmem_vm_ops = { .fault = secretmem_fault, }; static int secretmem_release(struct inode *inode, struct file *file) { atomic_dec(&secretmem_users); return 0; } static int secretmem_mmap_prepare(struct vm_area_desc *desc) { const unsigned long len = desc->end - desc->start; if ((desc->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) return -EINVAL; if (!mlock_future_ok(desc->mm, desc->vm_flags | VM_LOCKED, len)) return -EAGAIN; desc->vm_flags |= VM_LOCKED | VM_DONTDUMP; desc->vm_ops = &secretmem_vm_ops; return 0; } bool vma_is_secretmem(struct vm_area_struct *vma) { return vma->vm_ops == &secretmem_vm_ops; } static const struct file_operations secretmem_fops = { .release = secretmem_release, .mmap_prepare = secretmem_mmap_prepare, }; static int secretmem_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { return -EBUSY; } static void secretmem_free_folio(struct folio *folio) { set_direct_map_default_noflush(folio_page(folio, 0)); folio_zero_segment(folio, 0, folio_size(folio)); } const struct address_space_operations secretmem_aops = { .dirty_folio = noop_dirty_folio, .free_folio = secretmem_free_folio, .migrate_folio = secretmem_migrate_folio, }; static int secretmem_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *iattr) { struct inode *inode = d_inode(dentry); struct address_space *mapping = inode->i_mapping; unsigned int ia_valid = iattr->ia_valid; int ret; filemap_invalidate_lock(mapping); if ((ia_valid & ATTR_SIZE) && inode->i_size) ret = -EINVAL; else ret = simple_setattr(idmap, dentry, iattr); filemap_invalidate_unlock(mapping); return ret; } static const struct inode_operations secretmem_iops = { .setattr = secretmem_setattr, }; static struct vfsmount *secretmem_mnt; static struct file *secretmem_file_create(unsigned long flags) { struct file *file; struct inode *inode; const char *anon_name = "[secretmem]"; inode = anon_inode_make_secure_inode(secretmem_mnt->mnt_sb, anon_name, NULL); if (IS_ERR(inode)) return ERR_CAST(inode); file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem", O_RDWR | O_LARGEFILE, &secretmem_fops); if (IS_ERR(file)) goto err_free_inode; mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); mapping_set_unevictable(inode->i_mapping); inode->i_op = &secretmem_iops; inode->i_mapping->a_ops = &secretmem_aops; /* pretend we are a normal file with zero size */ inode->i_mode |= S_IFREG; inode->i_size = 0; atomic_inc(&secretmem_users); return file; err_free_inode: iput(inode); return file; } SYSCALL_DEFINE1(memfd_secret, unsigned int, flags) { struct file *file; int fd, err; /* make sure local flags do not confict with global fcntl.h */ BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC); if (!secretmem_enable || !can_set_direct_map()) return -ENOSYS; if (flags & ~(SECRETMEM_FLAGS_MASK | O_CLOEXEC)) return -EINVAL; if (atomic_read(&secretmem_users) < 0) return -ENFILE; fd = get_unused_fd_flags(flags & O_CLOEXEC); if (fd < 0) return fd; file = secretmem_file_create(flags); if (IS_ERR(file)) { err = PTR_ERR(file); goto err_put_fd; } fd_install(fd, file); return fd; err_put_fd: put_unused_fd(fd); return err; } static int secretmem_init_fs_context(struct fs_context *fc) { struct pseudo_fs_context *ctx; ctx = init_pseudo(fc, SECRETMEM_MAGIC); if (!ctx) return -ENOMEM; fc->s_iflags |= SB_I_NOEXEC; fc->s_iflags |= SB_I_NODEV; return 0; } static struct file_system_type secretmem_fs = { .name = "secretmem", .init_fs_context = secretmem_init_fs_context, .kill_sb = kill_anon_super, }; static int __init secretmem_init(void) { if (!secretmem_enable || !can_set_direct_map()) return 0; secretmem_mnt = kern_mount(&secretmem_fs); if (IS_ERR(secretmem_mnt)) return PTR_ERR(secretmem_mnt); return 0; } fs_initcall(secretmem_init);
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 // SPDX-License-Identifier: GPL-2.0 /* * USB Empeg empeg-car player driver * * Copyright (C) 2000, 2001 * Gary Brubaker (xavyer@ix.netcom.com) * * Copyright (C) 1999 - 2001 * Greg Kroah-Hartman (greg@kroah.com) * * See Documentation/usb/usb-serial.rst for more information on using this * driver */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Gary Brubaker <xavyer@ix.netcom.com>" #define DRIVER_DESC "USB Empeg Mark I/II Driver" #define EMPEG_VENDOR_ID 0x084f #define EMPEG_PRODUCT_ID 0x0001 /* function prototypes for an empeg-car player */ static int empeg_startup(struct usb_serial *serial); static void empeg_init_termios(struct tty_struct *tty); static const struct usb_device_id id_table[] = { { USB_DEVICE(EMPEG_VENDOR_ID, EMPEG_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver empeg_device = { .driver = { .name = "empeg", }, .id_table = id_table, .num_ports = 1, .bulk_out_size = 256, .throttle = usb_serial_generic_throttle, .unthrottle = usb_serial_generic_unthrottle, .attach = empeg_startup, .init_termios = empeg_init_termios, }; static struct usb_serial_driver * const serial_drivers[] = { &empeg_device, NULL }; static int empeg_startup(struct usb_serial *serial) { int r; if (serial->dev->actconfig->desc.bConfigurationValue != 1) { dev_err(&serial->dev->dev, "active config #%d != 1 ??\n", serial->dev->actconfig->desc.bConfigurationValue); return -ENODEV; } r = usb_reset_configuration(serial->dev); /* continue on with initialization */ return r; } static void empeg_init_termios(struct tty_struct *tty) { struct ktermios *termios = &tty->termios; /* * The empeg-car player wants these particular tty settings. * You could, for example, change the baud rate, however the * player only supports 115200 (currently), so there is really * no point in support for changes to the tty settings. * (at least for now) * * The default requirements for this device are: */ termios->c_iflag &= ~(IGNBRK /* disable ignore break */ | BRKINT /* disable break causes interrupt */ | PARMRK /* disable mark parity errors */ | ISTRIP /* disable clear high bit of input characters */ | INLCR /* disable translate NL to CR */ | IGNCR /* disable ignore CR */ | ICRNL /* disable translate CR to NL */ | IXON); /* disable enable XON/XOFF flow control */ termios->c_oflag &= ~OPOST; /* disable postprocess output characters */ termios->c_lflag &= ~(ECHO /* disable echo input characters */ | ECHONL /* disable echo new line */ | ICANON /* disable erase, kill, werase, and rprnt special characters */ | ISIG /* disable interrupt, quit, and suspend special characters */ | IEXTEN); /* disable non-POSIX special characters */ termios->c_cflag &= ~(CSIZE /* no size */ | PARENB /* disable parity bit */ | CBAUD); /* clear current baud rate */ termios->c_cflag |= CS8; /* character size 8 bits */ tty_encode_baud_rate(tty, 115200, 115200); } module_usb_serial_driver(serial_drivers, id_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL v2");
772 355 356 4 4 1 1 4 773 772 592 772 773 360 356 4 773 678 405 294 677 678 663 662 663 664 16 2 676 664 403 663 774 770 359 359 360 360 774 774 358 595 595 3 595 595 596 596 1 619 1 1 1 1 593 1 593 96 455 96 96 71 389 569 495 389 328 328 333 261 36 36 36 36 32 32 32 32 28 32 28 28 1 32 8 8 8 8 136 138 138 136 138 136 136 22 22 110 108 109 107 109 74 74 72 18 18 919 916 916 131 131 918 914 914 919 918 916 572 570 569 569 572 2 752 755 752 379 755 753 755 188 10 188 500 502 282 498 499 499 499 497 13 3 1040 1038 750 59 41 307 307 450 449 1 76 399 400 363 397 418 419 71 399 399 398 22 20 419 2 2 2 2 2 2 2 419 598 597 598 596 689 692 420 691 690 595 596 598 28 598 598 597 592 561 593 591 334 593 592 592 592 128 507 592 590 592 591 591 591 843 593 593 36 593 592 334 849 592 592 35 849 592 216 215 217 216 23 18 50 3 3 1 1 1 1 3 2 2 1 23 15 23 23 23 15 23 1123 1123 1125 1124 448 447 5 447 46 46 46 46 44 43 6 4 3 1 10 6 9 9 7 9 10 5 15 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/sch_generic.c Generic packet scheduler routines. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 * - Ingress support */ #include <linux/bitops.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/if_vlan.h> #include <linux/skb_array.h> #include <linux/if_macvlan.h> #include <linux/bpf.h> #include <net/sch_generic.h> #include <net/pkt_sched.h> #include <net/dst.h> #include <net/hotdata.h> #include <trace/events/qdisc.h> #include <trace/events/net.h> #include <net/xfrm.h> /* Qdisc to use by default */ const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; EXPORT_SYMBOL(default_qdisc_ops); static void qdisc_maybe_clear_missed(struct Qdisc *q, const struct netdev_queue *txq) { clear_bit(__QDISC_STATE_MISSED, &q->state); /* Make sure the below netif_xmit_frozen_or_stopped() * checking happens after clearing STATE_MISSED. */ smp_mb__after_atomic(); /* Checking netif_xmit_frozen_or_stopped() again to * make sure STATE_MISSED is set if the STATE_MISSED * set by netif_tx_wake_queue()'s rescheduling of * net_tx_action() is cleared by the above clear_bit(). */ if (!netif_xmit_frozen_or_stopped(txq)) set_bit(__QDISC_STATE_MISSED, &q->state); else set_bit(__QDISC_STATE_DRAINING, &q->state); } /* Main transmission queue. */ /* Modifications to data participating in scheduling must be protected with * qdisc_lock(qdisc) spinlock. * * The idea is the following: * - enqueue, dequeue are serialized via qdisc root lock * - ingress filtering is also serialized via qdisc root lock * - updates to tree and tree walking are only done under the rtnl mutex. */ #define SKB_XOFF_MAGIC ((struct sk_buff *)1UL) static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q) { const struct netdev_queue *txq = q->dev_queue; spinlock_t *lock = NULL; struct sk_buff *skb; if (q->flags & TCQ_F_NOLOCK) { lock = qdisc_lock(q); spin_lock(lock); } skb = skb_peek(&q->skb_bad_txq); if (skb) { /* check the reason of requeuing without tx lock first */ txq = skb_get_tx_queue(txq->dev, skb); if (!netif_xmit_frozen_or_stopped(txq)) { skb = __skb_dequeue(&q->skb_bad_txq); if (qdisc_is_percpu_stats(q)) { qdisc_qstats_cpu_backlog_dec(q, skb); qdisc_qstats_cpu_qlen_dec(q); } else { qdisc_qstats_backlog_dec(q, skb); q->q.qlen--; } } else { skb = SKB_XOFF_MAGIC; qdisc_maybe_clear_missed(q, txq); } } if (lock) spin_unlock(lock); return skb; } static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q) { struct sk_buff *skb = skb_peek(&q->skb_bad_txq); if (unlikely(skb)) skb = __skb_dequeue_bad_txq(q); return skb; } static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q, struct sk_buff *skb) { spinlock_t *lock = NULL; if (q->flags & TCQ_F_NOLOCK) { lock = qdisc_lock(q); spin_lock(lock); } __skb_queue_tail(&q->skb_bad_txq, skb); if (qdisc_is_percpu_stats(q)) { qdisc_qstats_cpu_backlog_inc(q, skb); qdisc_qstats_cpu_qlen_inc(q); } else { qdisc_qstats_backlog_inc(q, skb); q->q.qlen++; } if (lock) spin_unlock(lock); } static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) { spinlock_t *lock = NULL; if (q->flags & TCQ_F_NOLOCK) { lock = qdisc_lock(q); spin_lock(lock); } while (skb) { struct sk_buff *next = skb->next; __skb_queue_tail(&q->gso_skb, skb); /* it's still part of the queue */ if (qdisc_is_percpu_stats(q)) { qdisc_qstats_cpu_requeues_inc(q); qdisc_qstats_cpu_backlog_inc(q, skb); qdisc_qstats_cpu_qlen_inc(q); } else { q->qstats.requeues++; qdisc_qstats_backlog_inc(q, skb); q->q.qlen++; } skb = next; } if (lock) { spin_unlock(lock); set_bit(__QDISC_STATE_MISSED, &q->state); } else { __netif_schedule(q); } } static void try_bulk_dequeue_skb(struct Qdisc *q, struct sk_buff *skb, const struct netdev_queue *txq, int *packets) { int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; while (bytelimit > 0) { struct sk_buff *nskb = q->dequeue(q); if (!nskb) break; bytelimit -= nskb->len; /* covers GSO len */ skb->next = nskb; skb = nskb; (*packets)++; /* GSO counts as one pkt */ } skb_mark_not_on_list(skb); } /* This variant of try_bulk_dequeue_skb() makes sure * all skbs in the chain are for the same txq */ static void try_bulk_dequeue_skb_slow(struct Qdisc *q, struct sk_buff *skb, int *packets) { int mapping = skb_get_queue_mapping(skb); struct sk_buff *nskb; int cnt = 0; do { nskb = q->dequeue(q); if (!nskb) break; if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { qdisc_enqueue_skb_bad_txq(q, nskb); break; } skb->next = nskb; skb = nskb; } while (++cnt < 8); (*packets) += cnt; skb_mark_not_on_list(skb); } /* Note that dequeue_skb can possibly return a SKB list (via skb->next). * A requeued skb (via q->gso_skb) can also be a SKB list. */ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, int *packets) { const struct netdev_queue *txq = q->dev_queue; struct sk_buff *skb = NULL; *packets = 1; if (unlikely(!skb_queue_empty(&q->gso_skb))) { spinlock_t *lock = NULL; if (q->flags & TCQ_F_NOLOCK) { lock = qdisc_lock(q); spin_lock(lock); } skb = skb_peek(&q->gso_skb); /* skb may be null if another cpu pulls gso_skb off in between * empty check and lock. */ if (!skb) { if (lock) spin_unlock(lock); goto validate; } /* skb in gso_skb were already validated */ *validate = false; if (xfrm_offload(skb)) *validate = true; /* check the reason of requeuing without tx lock first */ txq = skb_get_tx_queue(txq->dev, skb); if (!netif_xmit_frozen_or_stopped(txq)) { skb = __skb_dequeue(&q->gso_skb); if (qdisc_is_percpu_stats(q)) { qdisc_qstats_cpu_backlog_dec(q, skb); qdisc_qstats_cpu_qlen_dec(q); } else { qdisc_qstats_backlog_dec(q, skb); q->q.qlen--; } } else { skb = NULL; qdisc_maybe_clear_missed(q, txq); } if (lock) spin_unlock(lock); goto trace; } validate: *validate = true; if ((q->flags & TCQ_F_ONETXQUEUE) && netif_xmit_frozen_or_stopped(txq)) { qdisc_maybe_clear_missed(q, txq); return skb; } skb = qdisc_dequeue_skb_bad_txq(q); if (unlikely(skb)) { if (skb == SKB_XOFF_MAGIC) return NULL; goto bulk; } skb = q->dequeue(q); if (skb) { bulk: if (qdisc_may_bulk(q)) try_bulk_dequeue_skb(q, skb, txq, packets); else try_bulk_dequeue_skb_slow(q, skb, packets); } trace: trace_qdisc_dequeue(q, txq, *packets, skb); return skb; } /* * Transmit possibly several skbs, and handle the return status as * required. Owning qdisc running bit guarantees that only one CPU * can execute this function. * * Returns to the caller: * false - hardware queue frozen backoff * true - feel free to send more pkts */ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq, spinlock_t *root_lock, bool validate) { int ret = NETDEV_TX_BUSY; bool again = false; /* And release qdisc */ if (root_lock) spin_unlock(root_lock); /* Note that we validate skb (GSO, checksum, ...) outside of locks */ if (validate) skb = validate_xmit_skb_list(skb, dev, &again); #ifdef CONFIG_XFRM_OFFLOAD if (unlikely(again)) { if (root_lock) spin_lock(root_lock); dev_requeue_skb(skb, q); return false; } #endif if (likely(skb)) { HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_stopped(txq)) skb = dev_hard_start_xmit(skb, dev, txq, &ret); else qdisc_maybe_clear_missed(q, txq); HARD_TX_UNLOCK(dev, txq); } else { if (root_lock) spin_lock(root_lock); return true; } if (root_lock) spin_lock(root_lock); if (!dev_xmit_complete(ret)) { /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely(ret != NETDEV_TX_BUSY)) net_warn_ratelimited("BUG %s code %d qlen %d\n", dev->name, ret, q->q.qlen); dev_requeue_skb(skb, q); return false; } return true; } /* * NOTE: Called under qdisc_lock(q) with locally disabled BH. * * running seqcount guarantees only one CPU can process * this qdisc at a time. qdisc_lock(q) serializes queue accesses for * this queue. * * netif_tx_lock serializes accesses to device driver. * * qdisc_lock(q) and netif_tx_lock are mutually exclusive, * if one is grabbed, another must be free. * * Note, that this procedure can be called by a watchdog timer * * Returns to the caller: * 0 - queue is empty or throttled. * >0 - queue is not empty. * */ static inline bool qdisc_restart(struct Qdisc *q, int *packets) { spinlock_t *root_lock = NULL; struct netdev_queue *txq; struct net_device *dev; struct sk_buff *skb; bool validate; /* Dequeue packet */ skb = dequeue_skb(q, &validate, packets); if (unlikely(!skb)) return false; if (!(q->flags & TCQ_F_NOLOCK)) root_lock = qdisc_lock(q); dev = qdisc_dev(q); txq = skb_get_tx_queue(dev, skb); return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); } void __qdisc_run(struct Qdisc *q) { int quota = READ_ONCE(net_hotdata.dev_tx_weight); int packets; while (qdisc_restart(q, &packets)) { quota -= packets; if (quota <= 0) { if (q->flags & TCQ_F_NOLOCK) set_bit(__QDISC_STATE_MISSED, &q->state); else __netif_schedule(q); break; } } } unsigned long dev_trans_start(struct net_device *dev) { unsigned long res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start); unsigned long val; unsigned int i; for (i = 1; i < dev->num_tx_queues; i++) { val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start); if (val && time_after(val, res)) res = val; } return res; } EXPORT_SYMBOL(dev_trans_start); static void netif_freeze_queues(struct net_device *dev) { unsigned int i; int cpu; cpu = smp_processor_id(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); /* We are the only thread of execution doing a * freeze, but we have to grab the _xmit_lock in * order to synchronize with threads which are in * the ->hard_start_xmit() handler and already * checked the frozen bit. */ __netif_tx_lock(txq, cpu); set_bit(__QUEUE_STATE_FROZEN, &txq->state); __netif_tx_unlock(txq); } } void netif_tx_lock(struct net_device *dev) { spin_lock(&dev->tx_global_lock); netif_freeze_queues(dev); } EXPORT_SYMBOL(netif_tx_lock); static void netif_unfreeze_queues(struct net_device *dev) { unsigned int i; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); /* No need to grab the _xmit_lock here. If the * queue is not stopped for another reason, we * force a schedule. */ clear_bit(__QUEUE_STATE_FROZEN, &txq->state); netif_schedule_queue(txq); } } void netif_tx_unlock(struct net_device *dev) { netif_unfreeze_queues(dev); spin_unlock(&dev->tx_global_lock); } EXPORT_SYMBOL(netif_tx_unlock); static void dev_watchdog(struct timer_list *t) { struct net_device *dev = timer_container_of(dev, t, watchdog_timer); bool release = true; spin_lock(&dev->tx_global_lock); if (!qdisc_tx_is_noop(dev)) { if (netif_device_present(dev) && netif_running(dev) && netif_carrier_ok(dev)) { unsigned int timedout_ms = 0; unsigned int i; unsigned long trans_start; unsigned long oldest_start = jiffies; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq; txq = netdev_get_tx_queue(dev, i); if (!netif_xmit_stopped(txq)) continue; /* Paired with WRITE_ONCE() + smp_mb...() in * netdev_tx_sent_queue() and netif_tx_stop_queue(). */ smp_mb(); trans_start = READ_ONCE(txq->trans_start); if (time_after(jiffies, trans_start + dev->watchdog_timeo)) { timedout_ms = jiffies_to_msecs(jiffies - trans_start); atomic_long_inc(&txq->trans_timeout); break; } if (time_after(oldest_start, trans_start)) oldest_start = trans_start; } if (unlikely(timedout_ms)) { trace_net_dev_xmit_timeout(dev, i); netdev_crit(dev, "NETDEV WATCHDOG: CPU: %d: transmit queue %u timed out %u ms\n", raw_smp_processor_id(), i, timedout_ms); netif_freeze_queues(dev); dev->netdev_ops->ndo_tx_timeout(dev, i); netif_unfreeze_queues(dev); } if (!mod_timer(&dev->watchdog_timer, round_jiffies(oldest_start + dev->watchdog_timeo))) release = false; } } spin_unlock(&dev->tx_global_lock); if (release) netdev_put(dev, &dev->watchdog_dev_tracker); } void netdev_watchdog_up(struct net_device *dev) { if (!dev->netdev_ops->ndo_tx_timeout) return; if (dev->watchdog_timeo <= 0) dev->watchdog_timeo = 5*HZ; if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) netdev_hold(dev, &dev->watchdog_dev_tracker, GFP_ATOMIC); } EXPORT_SYMBOL_GPL(netdev_watchdog_up); static void netdev_watchdog_down(struct net_device *dev) { netif_tx_lock_bh(dev); if (timer_delete(&dev->watchdog_timer)) netdev_put(dev, &dev->watchdog_dev_tracker); netif_tx_unlock_bh(dev); } /** * netif_carrier_on - set carrier * @dev: network device * * Device has detected acquisition of carrier. */ void netif_carrier_on(struct net_device *dev) { if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { if (dev->reg_state == NETREG_UNINITIALIZED) return; atomic_inc(&dev->carrier_up_count); linkwatch_fire_event(dev); if (netif_running(dev)) netdev_watchdog_up(dev); } } EXPORT_SYMBOL(netif_carrier_on); /** * netif_carrier_off - clear carrier * @dev: network device * * Device has detected loss of carrier. */ void netif_carrier_off(struct net_device *dev) { if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { if (dev->reg_state == NETREG_UNINITIALIZED) return; atomic_inc(&dev->carrier_down_count); linkwatch_fire_event(dev); } } EXPORT_SYMBOL(netif_carrier_off); /** * netif_carrier_event - report carrier state event * @dev: network device * * Device has detected a carrier event but the carrier state wasn't changed. * Use in drivers when querying carrier state asynchronously, to avoid missing * events (link flaps) if link recovers before it's queried. */ void netif_carrier_event(struct net_device *dev) { if (dev->reg_state == NETREG_UNINITIALIZED) return; atomic_inc(&dev->carrier_up_count); atomic_inc(&dev->carrier_down_count); linkwatch_fire_event(dev); } EXPORT_SYMBOL_GPL(netif_carrier_event); /* "NOOP" scheduler: the best scheduler, recommended for all interfaces under all circumstances. It is difficult to invent anything faster or cheaper. */ static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, struct sk_buff **to_free) { dev_core_stats_tx_dropped_inc(skb->dev); __qdisc_drop(skb, to_free); return NET_XMIT_CN; } static struct sk_buff *noop_dequeue(struct Qdisc *qdisc) { return NULL; } struct Qdisc_ops noop_qdisc_ops __read_mostly = { .id = "noop", .priv_size = 0, .enqueue = noop_enqueue, .dequeue = noop_dequeue, .peek = noop_dequeue, .owner = THIS_MODULE, }; static struct netdev_queue noop_netdev_queue = { RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc), RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc), }; struct Qdisc noop_qdisc = { .enqueue = noop_enqueue, .dequeue = noop_dequeue, .flags = TCQ_F_BUILTIN, .ops = &noop_qdisc_ops, .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), .dev_queue = &noop_netdev_queue, .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), .gso_skb = { .next = (struct sk_buff *)&noop_qdisc.gso_skb, .prev = (struct sk_buff *)&noop_qdisc.gso_skb, .qlen = 0, .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock), }, .skb_bad_txq = { .next = (struct sk_buff *)&noop_qdisc.skb_bad_txq, .prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq, .qlen = 0, .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock), }, .owner = -1, }; EXPORT_SYMBOL(noop_qdisc); static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt, struct netlink_ext_ack *extack) { /* register_qdisc() assigns a default of noop_enqueue if unset, * but __dev_queue_xmit() treats noqueue only as such * if this is NULL - so clear it here. */ qdisc->enqueue = NULL; return 0; } struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { .id = "noqueue", .priv_size = 0, .init = noqueue_init, .enqueue = noop_enqueue, .dequeue = noop_dequeue, .peek = noop_dequeue, .owner = THIS_MODULE, }; const u8 sch_default_prio2band[TC_PRIO_MAX + 1] = { 1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1 }; EXPORT_SYMBOL(sch_default_prio2band); /* 3-band FIFO queue: old style, but should be a bit faster than generic prio+fifo combination. */ #define PFIFO_FAST_BANDS 3 /* * Private data for a pfifo_fast scheduler containing: * - rings for priority bands */ struct pfifo_fast_priv { struct skb_array q[PFIFO_FAST_BANDS]; }; static inline struct skb_array *band2list(struct pfifo_fast_priv *priv, int band) { return &priv->q[band]; } static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, struct sk_buff **to_free) { int band = sch_default_prio2band[skb->priority & TC_PRIO_MAX]; struct pfifo_fast_priv *priv = qdisc_priv(qdisc); struct skb_array *q = band2list(priv, band); unsigned int pkt_len = qdisc_pkt_len(skb); int err; err = skb_array_produce(q, skb); if (unlikely(err)) { tcf_set_drop_reason(skb, SKB_DROP_REASON_QDISC_OVERLIMIT); if (qdisc_is_percpu_stats(qdisc)) return qdisc_drop_cpu(skb, qdisc, to_free); else return qdisc_drop(skb, qdisc, to_free); } qdisc_update_stats_at_enqueue(qdisc, pkt_len); return NET_XMIT_SUCCESS; } static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) { struct pfifo_fast_priv *priv = qdisc_priv(qdisc); struct sk_buff *skb = NULL; bool need_retry = true; int band; retry: for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { struct skb_array *q = band2list(priv, band); if (__skb_array_empty(q)) continue; skb = __skb_array_consume(q); } if (likely(skb)) { qdisc_update_stats_at_dequeue(qdisc, skb); } else if (need_retry && READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) { /* Delay clearing the STATE_MISSED here to reduce * the overhead of the second spin_trylock() in * qdisc_run_begin() and __netif_schedule() calling * in qdisc_run_end(). */ clear_bit(__QDISC_STATE_MISSED, &qdisc->state); clear_bit(__QDISC_STATE_DRAINING, &qdisc->state); /* Make sure dequeuing happens after clearing * STATE_MISSED. */ smp_mb__after_atomic(); need_retry = false; goto retry; } return skb; } static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) { struct pfifo_fast_priv *priv = qdisc_priv(qdisc); struct sk_buff *skb = NULL; int band; for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { struct skb_array *q = band2list(priv, band); skb = __skb_array_peek(q); } return skb; } static void pfifo_fast_reset(struct Qdisc *qdisc) { int i, band; struct pfifo_fast_priv *priv = qdisc_priv(qdisc); for (band = 0; band < PFIFO_FAST_BANDS; band++) { struct skb_array *q = band2list(priv, band); struct sk_buff *skb; /* NULL ring is possible if destroy path is due to a failed * skb_array_init() in pfifo_fast_init() case. */ if (!q->ring.queue) continue; while ((skb = __skb_array_consume(q)) != NULL) kfree_skb(skb); } if (qdisc_is_percpu_stats(qdisc)) { for_each_possible_cpu(i) { struct gnet_stats_queue *q; q = per_cpu_ptr(qdisc->cpu_qstats, i); q->backlog = 0; q->qlen = 0; } } } static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) { struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; memcpy(&opt.priomap, sch_default_prio2band, TC_PRIO_MAX + 1); if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) goto nla_put_failure; return skb->len; nla_put_failure: return -1; } static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt, struct netlink_ext_ack *extack) { unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len; struct pfifo_fast_priv *priv = qdisc_priv(qdisc); int prio; /* guard against zero length rings */ if (!qlen) return -EINVAL; for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { struct skb_array *q = band2list(priv, prio); int err; err = skb_array_init(q, qlen, GFP_KERNEL); if (err) return -ENOMEM; } /* Can by-pass the queue discipline */ qdisc->flags |= TCQ_F_CAN_BYPASS; return 0; } static void pfifo_fast_destroy(struct Qdisc *sch) { struct pfifo_fast_priv *priv = qdisc_priv(sch); int prio; for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { struct skb_array *q = band2list(priv, prio); /* NULL ring is possible if destroy path is due to a failed * skb_array_init() in pfifo_fast_init() case. */ if (!q->ring.queue) continue; /* Destroy ring but no need to kfree_skb because a call to * pfifo_fast_reset() has already done that work. */ ptr_ring_cleanup(&q->ring, NULL); } } static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch, unsigned int new_len) { struct pfifo_fast_priv *priv = qdisc_priv(sch); struct skb_array *bands[PFIFO_FAST_BANDS]; int prio; for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { struct skb_array *q = band2list(priv, prio); bands[prio] = q; } return skb_array_resize_multiple_bh(bands, PFIFO_FAST_BANDS, new_len, GFP_KERNEL); } struct Qdisc_ops pfifo_fast_ops __read_mostly = { .id = "pfifo_fast", .priv_size = sizeof(struct pfifo_fast_priv), .enqueue = pfifo_fast_enqueue, .dequeue = pfifo_fast_dequeue, .peek = pfifo_fast_peek, .init = pfifo_fast_init, .destroy = pfifo_fast_destroy, .reset = pfifo_fast_reset, .dump = pfifo_fast_dump, .change_tx_queue_len = pfifo_fast_change_tx_queue_len, .owner = THIS_MODULE, .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS, }; EXPORT_SYMBOL(pfifo_fast_ops); static struct lock_class_key qdisc_tx_busylock; struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, const struct Qdisc_ops *ops, struct netlink_ext_ack *extack) { struct Qdisc *sch; unsigned int size = sizeof(*sch) + ops->priv_size; int err = -ENOBUFS; struct net_device *dev; if (!dev_queue) { NL_SET_ERR_MSG(extack, "No device queue given"); err = -EINVAL; goto errout; } dev = dev_queue->dev; sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue)); if (!sch) goto errout; __skb_queue_head_init(&sch->gso_skb); __skb_queue_head_init(&sch->skb_bad_txq); gnet_stats_basic_sync_init(&sch->bstats); lockdep_register_key(&sch->root_lock_key); spin_lock_init(&sch->q.lock); lockdep_set_class(&sch->q.lock, &sch->root_lock_key); if (ops->static_flags & TCQ_F_CPUSTATS) { sch->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync); if (!sch->cpu_bstats) goto errout1; sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue); if (!sch->cpu_qstats) { free_percpu(sch->cpu_bstats); goto errout1; } } spin_lock_init(&sch->busylock); lockdep_set_class(&sch->busylock, dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); /* seqlock has the same scope of busylock, for NOLOCK qdisc */ spin_lock_init(&sch->seqlock); lockdep_set_class(&sch->seqlock, dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); sch->ops = ops; sch->flags = ops->static_flags; sch->enqueue = ops->enqueue; sch->dequeue = ops->dequeue; sch->dev_queue = dev_queue; sch->owner = -1; netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL); refcount_set(&sch->refcnt, 1); return sch; errout1: lockdep_unregister_key(&sch->root_lock_key); kfree(sch); errout: return ERR_PTR(err); } struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, const struct Qdisc_ops *ops, unsigned int parentid, struct netlink_ext_ack *extack) { struct Qdisc *sch; if (!bpf_try_module_get(ops, ops->owner)) { NL_SET_ERR_MSG(extack, "Failed to increase module reference counter"); return NULL; } sch = qdisc_alloc(dev_queue, ops, extack); if (IS_ERR(sch)) { bpf_module_put(ops, ops->owner); return NULL; } sch->parent = parentid; if (!ops->init || ops->init(sch, NULL, extack) == 0) { trace_qdisc_create(ops, dev_queue->dev, parentid); return sch; } qdisc_put(sch); return NULL; } EXPORT_SYMBOL(qdisc_create_dflt); /* Under qdisc_lock(qdisc) and BH! */ void qdisc_reset(struct Qdisc *qdisc) { const struct Qdisc_ops *ops = qdisc->ops; trace_qdisc_reset(qdisc); if (ops->reset) ops->reset(qdisc); __skb_queue_purge(&qdisc->gso_skb); __skb_queue_purge(&qdisc->skb_bad_txq); qdisc->q.qlen = 0; qdisc->qstats.backlog = 0; } EXPORT_SYMBOL(qdisc_reset); void qdisc_free(struct Qdisc *qdisc) { if (qdisc_is_percpu_stats(qdisc)) { free_percpu(qdisc->cpu_bstats); free_percpu(qdisc->cpu_qstats); } kfree(qdisc); } static void qdisc_free_cb(struct rcu_head *head) { struct Qdisc *q = container_of(head, struct Qdisc, rcu); qdisc_free(q); } static void __qdisc_destroy(struct Qdisc *qdisc) { const struct Qdisc_ops *ops = qdisc->ops; struct net_device *dev = qdisc_dev(qdisc); #ifdef CONFIG_NET_SCHED qdisc_hash_del(qdisc); qdisc_put_stab(rtnl_dereference(qdisc->stab)); #endif gen_kill_estimator(&qdisc->rate_est); qdisc_reset(qdisc); if (ops->destroy) ops->destroy(qdisc); lockdep_unregister_key(&qdisc->root_lock_key); bpf_module_put(ops, ops->owner); netdev_put(dev, &qdisc->dev_tracker); trace_qdisc_destroy(qdisc); call_rcu(&qdisc->rcu, qdisc_free_cb); } void qdisc_destroy(struct Qdisc *qdisc) { if (qdisc->flags & TCQ_F_BUILTIN) return; __qdisc_destroy(qdisc); } void qdisc_put(struct Qdisc *qdisc) { if (!qdisc) return; if (qdisc->flags & TCQ_F_BUILTIN || !refcount_dec_and_test(&qdisc->refcnt)) return; __qdisc_destroy(qdisc); } EXPORT_SYMBOL(qdisc_put); /* Version of qdisc_put() that is called with rtnl mutex unlocked. * Intended to be used as optimization, this function only takes rtnl lock if * qdisc reference counter reached zero. */ void qdisc_put_unlocked(struct Qdisc *qdisc) { if (qdisc->flags & TCQ_F_BUILTIN || !refcount_dec_and_rtnl_lock(&qdisc->refcnt)) return; __qdisc_destroy(qdisc); rtnl_unlock(); } EXPORT_SYMBOL(qdisc_put_unlocked); /* Attach toplevel qdisc to device queue. */ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, struct Qdisc *qdisc) { struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping); spinlock_t *root_lock; root_lock = qdisc_lock(oqdisc); spin_lock_bh(root_lock); /* ... and graft new one */ if (qdisc == NULL) qdisc = &noop_qdisc; rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); spin_unlock_bh(root_lock); return oqdisc; } EXPORT_SYMBOL(dev_graft_qdisc); static void shutdown_scheduler_queue(struct net_device *dev, struct netdev_queue *dev_queue, void *_qdisc_default) { struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); struct Qdisc *qdisc_default = _qdisc_default; if (qdisc) { rcu_assign_pointer(dev_queue->qdisc, qdisc_default); rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default); qdisc_put(qdisc); } } static void attach_one_default_qdisc(struct net_device *dev, struct netdev_queue *dev_queue, void *_unused) { struct Qdisc *qdisc; const struct Qdisc_ops *ops = default_qdisc_ops; if (dev->priv_flags & IFF_NO_QUEUE) ops = &noqueue_qdisc_ops; else if(dev->type == ARPHRD_CAN) ops = &pfifo_fast_ops; qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL); if (!qdisc) return; if (!netif_is_multiqueue(dev)) qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); } static void attach_default_qdiscs(struct net_device *dev) { struct netdev_queue *txq; struct Qdisc *qdisc; txq = netdev_get_tx_queue(dev, 0); if (!netif_is_multiqueue(dev) || dev->priv_flags & IFF_NO_QUEUE) { netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); qdisc = rtnl_dereference(txq->qdisc_sleeping); rcu_assign_pointer(dev->qdisc, qdisc); qdisc_refcount_inc(qdisc); } else { qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL); if (qdisc) { rcu_assign_pointer(dev->qdisc, qdisc); qdisc->ops->attach(qdisc); } } qdisc = rtnl_dereference(dev->qdisc); /* Detect default qdisc setup/init failed and fallback to "noqueue" */ if (qdisc == &noop_qdisc) { netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n", default_qdisc_ops->id, noqueue_qdisc_ops.id); netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); dev->priv_flags |= IFF_NO_QUEUE; netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); qdisc = rtnl_dereference(txq->qdisc_sleeping); rcu_assign_pointer(dev->qdisc, qdisc); qdisc_refcount_inc(qdisc); dev->priv_flags ^= IFF_NO_QUEUE; } #ifdef CONFIG_NET_SCHED if (qdisc != &noop_qdisc) qdisc_hash_add(qdisc, false); #endif } static void transition_one_qdisc(struct net_device *dev, struct netdev_queue *dev_queue, void *_need_watchdog) { struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); int *need_watchdog_p = _need_watchdog; if (!(new_qdisc->flags & TCQ_F_BUILTIN)) clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); rcu_assign_pointer(dev_queue->qdisc, new_qdisc); if (need_watchdog_p) { WRITE_ONCE(dev_queue->trans_start, 0); *need_watchdog_p = 1; } } void dev_activate(struct net_device *dev) { int need_watchdog; /* No queueing discipline is attached to device; * create default one for devices, which need queueing * and noqueue_qdisc for virtual interfaces */ if (rtnl_dereference(dev->qdisc) == &noop_qdisc) attach_default_qdiscs(dev); if (!netif_carrier_ok(dev)) /* Delay activation until next carrier-on event */ return; need_watchdog = 0; netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); if (dev_ingress_queue(dev)) transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); if (need_watchdog) { netif_trans_update(dev); netdev_watchdog_up(dev); } } EXPORT_SYMBOL(dev_activate); static void qdisc_deactivate(struct Qdisc *qdisc) { if (qdisc->flags & TCQ_F_BUILTIN) return; set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); } static void dev_deactivate_queue(struct net_device *dev, struct netdev_queue *dev_queue, void *_sync_needed) { bool *sync_needed = _sync_needed; struct Qdisc *qdisc; qdisc = rtnl_dereference(dev_queue->qdisc); if (qdisc) { if (qdisc->enqueue) *sync_needed = true; qdisc_deactivate(qdisc); rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); } } static void dev_reset_queue(struct net_device *dev, struct netdev_queue *dev_queue, void *_unused) { struct Qdisc *qdisc; bool nolock; qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); if (!qdisc) return; nolock = qdisc->flags & TCQ_F_NOLOCK; if (nolock) spin_lock_bh(&qdisc->seqlock); spin_lock_bh(qdisc_lock(qdisc)); qdisc_reset(qdisc); spin_unlock_bh(qdisc_lock(qdisc)); if (nolock) { clear_bit(__QDISC_STATE_MISSED, &qdisc->state); clear_bit(__QDISC_STATE_DRAINING, &qdisc->state); spin_unlock_bh(&qdisc->seqlock); } } static bool some_qdisc_is_busy(struct net_device *dev) { unsigned int i; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *dev_queue; spinlock_t *root_lock; struct Qdisc *q; int val; dev_queue = netdev_get_tx_queue(dev, i); q = rtnl_dereference(dev_queue->qdisc_sleeping); root_lock = qdisc_lock(q); spin_lock_bh(root_lock); val = (qdisc_is_running(q) || test_bit(__QDISC_STATE_SCHED, &q->state)); spin_unlock_bh(root_lock); if (val) return true; } return false; } /** * dev_deactivate_many - deactivate transmissions on several devices * @head: list of devices to deactivate * * This function returns only when all outstanding transmissions * have completed, unless all devices are in dismantle phase. */ void dev_deactivate_many(struct list_head *head) { bool sync_needed = false; struct net_device *dev; list_for_each_entry(dev, head, close_list) { netdev_for_each_tx_queue(dev, dev_deactivate_queue, &sync_needed); if (dev_ingress_queue(dev)) dev_deactivate_queue(dev, dev_ingress_queue(dev), &sync_needed); netdev_watchdog_down(dev); } /* Wait for outstanding qdisc enqueuing calls. */ if (sync_needed) synchronize_net(); list_for_each_entry(dev, head, close_list) { netdev_for_each_tx_queue(dev, dev_reset_queue, NULL); if (dev_ingress_queue(dev)) dev_reset_queue(dev, dev_ingress_queue(dev), NULL); } /* Wait for outstanding qdisc_run calls. */ list_for_each_entry(dev, head, close_list) { while (some_qdisc_is_busy(dev)) { /* wait_event() would avoid this sleep-loop but would * require expensive checks in the fast paths of packet * processing which isn't worth it. */ schedule_timeout_uninterruptible(1); } } } void dev_deactivate(struct net_device *dev) { LIST_HEAD(single); list_add(&dev->close_list, &single); dev_deactivate_many(&single); list_del(&single); } EXPORT_SYMBOL(dev_deactivate); static int qdisc_change_tx_queue_len(struct net_device *dev, struct netdev_queue *dev_queue) { struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); const struct Qdisc_ops *ops = qdisc->ops; if (ops->change_tx_queue_len) return ops->change_tx_queue_len(qdisc, dev->tx_queue_len); return 0; } void dev_qdisc_change_real_num_tx(struct net_device *dev, unsigned int new_real_tx) { struct Qdisc *qdisc = rtnl_dereference(dev->qdisc); if (qdisc->ops->change_real_num_tx) qdisc->ops->change_real_num_tx(qdisc, new_real_tx); } void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx) { #ifdef CONFIG_NET_SCHED struct net_device *dev = qdisc_dev(sch); struct Qdisc *qdisc; unsigned int i; for (i = new_real_tx; i < dev->real_num_tx_queues; i++) { qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); /* Only update the default qdiscs we created, * qdiscs with handles are always hashed. */ if (qdisc != &noop_qdisc && !qdisc->handle) qdisc_hash_del(qdisc); } for (i = dev->real_num_tx_queues; i < new_real_tx; i++) { qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); if (qdisc != &noop_qdisc && !qdisc->handle) qdisc_hash_add(qdisc, false); } #endif } EXPORT_SYMBOL(mq_change_real_num_tx); int dev_qdisc_change_tx_queue_len(struct net_device *dev) { bool up = dev->flags & IFF_UP; unsigned int i; int ret = 0; if (up) dev_deactivate(dev); for (i = 0; i < dev->num_tx_queues; i++) { ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]); /* TODO: revert changes on a partial failure */ if (ret) break; } if (up) dev_activate(dev); return ret; } static void dev_init_scheduler_queue(struct net_device *dev, struct netdev_queue *dev_queue, void *_qdisc) { struct Qdisc *qdisc = _qdisc; rcu_assign_pointer(dev_queue->qdisc, qdisc); rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); } void dev_init_scheduler(struct net_device *dev) { rcu_assign_pointer(dev->qdisc, &noop_qdisc); netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); if (dev_ingress_queue(dev)) dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); timer_setup(&dev->watchdog_timer, dev_watchdog, 0); } void dev_shutdown(struct net_device *dev) { netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); if (dev_ingress_queue(dev)) shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); qdisc_put(rtnl_dereference(dev->qdisc)); rcu_assign_pointer(dev->qdisc, &noop_qdisc); WARN_ON(timer_pending(&dev->watchdog_timer)); } /** * psched_ratecfg_precompute__() - Pre-compute values for reciprocal division * @rate: Rate to compute reciprocal division values of * @mult: Multiplier for reciprocal division * @shift: Shift for reciprocal division * * The multiplier and shift for reciprocal division by rate are stored * in mult and shift. * * The deal here is to replace a divide by a reciprocal one * in fast path (a reciprocal divide is a multiply and a shift) * * Normal formula would be : * time_in_ns = (NSEC_PER_SEC * len) / rate_bps * * We compute mult/shift to use instead : * time_in_ns = (len * mult) >> shift; * * We try to get the highest possible mult value for accuracy, * but have to make sure no overflows will ever happen. * * reciprocal_value() is not used here it doesn't handle 64-bit values. */ static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift) { u64 factor = NSEC_PER_SEC; *mult = 1; *shift = 0; if (rate <= 0) return; for (;;) { *mult = div64_u64(factor, rate); if (*mult & (1U << 31) || factor & (1ULL << 63)) break; factor <<= 1; (*shift)++; } } void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf, u64 rate64) { memset(r, 0, sizeof(*r)); r->overhead = conf->overhead; r->mpu = conf->mpu; r->rate_bytes_ps = max_t(u64, conf->rate, rate64); r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift); } EXPORT_SYMBOL(psched_ratecfg_precompute); void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64) { r->rate_pkts_ps = pktrate64; psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift); } EXPORT_SYMBOL(psched_ppscfg_precompute); void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, struct tcf_proto *tp_head) { /* Protected with chain0->filter_chain_lock. * Can't access chain directly because tp_head can be NULL. */ struct mini_Qdisc *miniq_old = rcu_dereference_protected(*miniqp->p_miniq, 1); struct mini_Qdisc *miniq; if (!tp_head) { RCU_INIT_POINTER(*miniqp->p_miniq, NULL); } else { miniq = miniq_old != &miniqp->miniq1 ? &miniqp->miniq1 : &miniqp->miniq2; /* We need to make sure that readers won't see the miniq * we are about to modify. So ensure that at least one RCU * grace period has elapsed since the miniq was made * inactive. */ if (IS_ENABLED(CONFIG_PREEMPT_RT)) cond_synchronize_rcu(miniq->rcu_state); else if (!poll_state_synchronize_rcu(miniq->rcu_state)) synchronize_rcu_expedited(); miniq->filter_list = tp_head; rcu_assign_pointer(*miniqp->p_miniq, miniq); } if (miniq_old) /* This is counterpart of the rcu sync above. We need to * block potential new user of miniq_old until all readers * are not seeing it. */ miniq_old->rcu_state = start_poll_synchronize_rcu(); } EXPORT_SYMBOL(mini_qdisc_pair_swap); void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, struct tcf_block *block) { miniqp->miniq1.block = block; miniqp->miniq2.block = block; } EXPORT_SYMBOL(mini_qdisc_pair_block_init); void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, struct mini_Qdisc __rcu **p_miniq) { miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats; miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats; miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats; miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats; miniqp->miniq1.rcu_state = get_state_synchronize_rcu(); miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state; miniqp->p_miniq = p_miniq; } EXPORT_SYMBOL(mini_qdisc_pair_init);
1 1 8 7 6 1 5 1 4 2 8 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 // SPDX-License-Identifier: GPL-2.0-only /* * Xtables module to match the process control group. * * Might be used to implement individual "per-application" firewall * policies in contrast to global policies based on control groups. * Matching is based upon processes tagged to net_cls' classid marker. * * (C) 2013 Daniel Borkmann <dborkman@redhat.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/skbuff.h> #include <linux/module.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_cgroup.h> #include <net/sock.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>"); MODULE_DESCRIPTION("Xtables: process control group matching"); MODULE_ALIAS("ipt_cgroup"); MODULE_ALIAS("ip6t_cgroup"); #define NET_CLS_CLASSID_INVALID_MSG "xt_cgroup: classid invalid without net_cls cgroups\n" static int cgroup_mt_check_v0(const struct xt_mtchk_param *par) { struct xt_cgroup_info_v0 *info = par->matchinfo; if (info->invert & ~1) return -EINVAL; if (!IS_ENABLED(CONFIG_CGROUP_NET_CLASSID)) { pr_info(NET_CLS_CLASSID_INVALID_MSG); return -EINVAL; } return 0; } static int cgroup_mt_check_v1(const struct xt_mtchk_param *par) { struct xt_cgroup_info_v1 *info = par->matchinfo; struct cgroup *cgrp; if ((info->invert_path & ~1) || (info->invert_classid & ~1)) return -EINVAL; if (!info->has_path && !info->has_classid) { pr_info("xt_cgroup: no path or classid specified\n"); return -EINVAL; } if (info->has_path && info->has_classid) { pr_info_ratelimited("path and classid specified\n"); return -EINVAL; } if (info->has_classid && !IS_ENABLED(CONFIG_CGROUP_NET_CLASSID)) { pr_info(NET_CLS_CLASSID_INVALID_MSG); return -EINVAL; } info->priv = NULL; if (info->has_path) { cgrp = cgroup_get_from_path(info->path); if (IS_ERR(cgrp)) { pr_info_ratelimited("invalid path, errno=%ld\n", PTR_ERR(cgrp)); return -EINVAL; } info->priv = cgrp; } return 0; } static int cgroup_mt_check_v2(const struct xt_mtchk_param *par) { struct xt_cgroup_info_v2 *info = par->matchinfo; struct cgroup *cgrp; if ((info->invert_path & ~1) || (info->invert_classid & ~1)) return -EINVAL; if (!info->has_path && !info->has_classid) { pr_info("xt_cgroup: no path or classid specified\n"); return -EINVAL; } if (info->has_path && info->has_classid) { pr_info_ratelimited("path and classid specified\n"); return -EINVAL; } if (info->has_classid && !IS_ENABLED(CONFIG_CGROUP_NET_CLASSID)) { pr_info(NET_CLS_CLASSID_INVALID_MSG); return -EINVAL; } info->priv = NULL; if (info->has_path) { cgrp = cgroup_get_from_path(info->path); if (IS_ERR(cgrp)) { pr_info_ratelimited("invalid path, errno=%ld\n", PTR_ERR(cgrp)); return -EINVAL; } info->priv = cgrp; } return 0; } static bool cgroup_mt_v0(const struct sk_buff *skb, struct xt_action_param *par) { #ifdef CONFIG_CGROUP_NET_CLASSID const struct xt_cgroup_info_v0 *info = par->matchinfo; struct sock *sk = skb->sk; if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk))) return false; return (info->id == sock_cgroup_classid(&skb->sk->sk_cgrp_data)) ^ info->invert; #endif return false; } static bool cgroup_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_cgroup_info_v1 *info = par->matchinfo; struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data; struct cgroup *ancestor = info->priv; struct sock *sk = skb->sk; if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk))) return false; if (ancestor) return cgroup_is_descendant(sock_cgroup_ptr(skcd), ancestor) ^ info->invert_path; #ifdef CONFIG_CGROUP_NET_CLASSID else return (info->classid == sock_cgroup_classid(skcd)) ^ info->invert_classid; #endif return false; } static bool cgroup_mt_v2(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_cgroup_info_v2 *info = par->matchinfo; struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data; struct cgroup *ancestor = info->priv; struct sock *sk = skb->sk; if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk))) return false; if (ancestor) return cgroup_is_descendant(sock_cgroup_ptr(skcd), ancestor) ^ info->invert_path; #ifdef CONFIG_CGROUP_NET_CLASSID else return (info->classid == sock_cgroup_classid(skcd)) ^ info->invert_classid; #endif return false; } static void cgroup_mt_destroy_v1(const struct xt_mtdtor_param *par) { struct xt_cgroup_info_v1 *info = par->matchinfo; if (info->priv) cgroup_put(info->priv); } static void cgroup_mt_destroy_v2(const struct xt_mtdtor_param *par) { struct xt_cgroup_info_v2 *info = par->matchinfo; if (info->priv) cgroup_put(info->priv); } static struct xt_match cgroup_mt_reg[] __read_mostly = { { .name = "cgroup", .revision = 0, .family = NFPROTO_UNSPEC, .checkentry = cgroup_mt_check_v0, .match = cgroup_mt_v0, .matchsize = sizeof(struct xt_cgroup_info_v0), .me = THIS_MODULE, .hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN), }, { .name = "cgroup", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = cgroup_mt_check_v1, .match = cgroup_mt_v1, .matchsize = sizeof(struct xt_cgroup_info_v1), .usersize = offsetof(struct xt_cgroup_info_v1, priv), .destroy = cgroup_mt_destroy_v1, .me = THIS_MODULE, .hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN), }, { .name = "cgroup", .revision = 2, .family = NFPROTO_UNSPEC, .checkentry = cgroup_mt_check_v2, .match = cgroup_mt_v2, .matchsize = sizeof(struct xt_cgroup_info_v2), .usersize = offsetof(struct xt_cgroup_info_v2, priv), .destroy = cgroup_mt_destroy_v2, .me = THIS_MODULE, .hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN), }, }; static int __init cgroup_mt_init(void) { return xt_register_matches(cgroup_mt_reg, ARRAY_SIZE(cgroup_mt_reg)); } static void __exit cgroup_mt_exit(void) { xt_unregister_matches(cgroup_mt_reg, ARRAY_SIZE(cgroup_mt_reg)); } module_init(cgroup_mt_init); module_exit(cgroup_mt_exit);
25 25 2 2 25 25 25 25 25 25 25 25 2 1 2 2 50 45 45 22 22 19 9 8 13 10 12 12 11 11 9 9 8 8 10 10 7 3 2 7 5 9 13 12 9 3 7 18 9 12 10 1 5 3 2 1 58 31 105 1 3 2 2 106 2 2 1 1 10 10 2 2 2 1 1 3 3 7 1 3 9 1 1 1 4 4 4 4 14 14 14 14 14 4 4 4 4 4 5 1 5 5 5 5 5 12 12 12 12 10 10 10 10 1 1 1 1 2 2 2 2 2 1 1 1 1 1 1 1 1 4 4 4 4 4 1 2 1 2 1 2 1 1 1 2 2 7 1 2 106 106 106 58 58 54 53 106 106 107 10 8 8 10 6 1 6 1 6 6 6 4 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 4 6 6 6 6 6 6 6 6 6 25 25 25 25 24 25 24 25 2 2 47 46 47 47 47 12 12 12 12 12 9 9 9 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 // SPDX-License-Identifier: GPL-2.0-only /* * V4L2 sub-device * * Copyright (C) 2010 Nokia Corporation * * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> */ #include <linux/export.h> #include <linux/ioctl.h> #include <linux/leds.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/overflow.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/types.h> #include <linux/version.h> #include <linux/videodev2.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-fh.h> #include <media/v4l2-ioctl.h> /** * struct v4l2_subdev_stream_config - Used for storing stream configuration. * * @pad: pad number * @stream: stream number * @enabled: has the stream been enabled with v4l2_subdev_enable_streams() * @fmt: &struct v4l2_mbus_framefmt * @crop: &struct v4l2_rect to be used for crop * @compose: &struct v4l2_rect to be used for compose * @interval: frame interval * * This structure stores configuration for a stream. */ struct v4l2_subdev_stream_config { u32 pad; u32 stream; bool enabled; struct v4l2_mbus_framefmt fmt; struct v4l2_rect crop; struct v4l2_rect compose; struct v4l2_fract interval; }; #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) /* * The Streams API is an experimental feature. To use the Streams API, set * 'v4l2_subdev_enable_streams_api' to 1 below. */ static bool v4l2_subdev_enable_streams_api; #endif /* * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set * of streams. * * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX * restricts the total number of streams in a pad, although the stream ID is * not restricted. */ #define V4L2_SUBDEV_MAX_STREAM_ID 63 #include "v4l2-subdev-priv.h" #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd) { struct v4l2_subdev_state *state; static struct lock_class_key key; state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key); if (IS_ERR(state)) return PTR_ERR(state); fh->state = state; return 0; } static void subdev_fh_free(struct v4l2_subdev_fh *fh) { __v4l2_subdev_state_free(fh->state); fh->state = NULL; } static int subdev_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); struct v4l2_subdev_fh *subdev_fh; int ret; subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL); if (subdev_fh == NULL) return -ENOMEM; ret = subdev_fh_init(subdev_fh, sd); if (ret) { kfree(subdev_fh); return ret; } v4l2_fh_init(&subdev_fh->vfh, vdev); v4l2_fh_add(&subdev_fh->vfh, file); if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) { struct module *owner; owner = sd->entity.graph_obj.mdev->dev->driver->owner; if (!try_module_get(owner)) { ret = -EBUSY; goto err; } subdev_fh->owner = owner; } if (sd->internal_ops && sd->internal_ops->open) { ret = sd->internal_ops->open(sd, subdev_fh); if (ret < 0) goto err; } return 0; err: module_put(subdev_fh->owner); v4l2_fh_del(&subdev_fh->vfh, file); v4l2_fh_exit(&subdev_fh->vfh); subdev_fh_free(subdev_fh); kfree(subdev_fh); return ret; } static int subdev_close(struct file *file) { struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); struct v4l2_fh *vfh = file_to_v4l2_fh(file); struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); if (sd->internal_ops && sd->internal_ops->close) sd->internal_ops->close(sd, subdev_fh); module_put(subdev_fh->owner); v4l2_fh_del(vfh, file); v4l2_fh_exit(vfh); subdev_fh_free(subdev_fh); kfree(subdev_fh); return 0; } #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */ static int subdev_open(struct file *file) { return -ENODEV; } static int subdev_close(struct file *file) { return -ENODEV; } #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ static void v4l2_subdev_enable_privacy_led(struct v4l2_subdev *sd) { #if IS_REACHABLE(CONFIG_LEDS_CLASS) if (!IS_ERR_OR_NULL(sd->privacy_led)) led_set_brightness(sd->privacy_led, sd->privacy_led->max_brightness); #endif } static void v4l2_subdev_disable_privacy_led(struct v4l2_subdev *sd) { #if IS_REACHABLE(CONFIG_LEDS_CLASS) if (!IS_ERR_OR_NULL(sd->privacy_led)) led_set_brightness(sd->privacy_led, 0); #endif } static inline int check_which(u32 which) { if (which != V4L2_SUBDEV_FORMAT_TRY && which != V4L2_SUBDEV_FORMAT_ACTIVE) return -EINVAL; return 0; } static inline int check_pad(struct v4l2_subdev *sd, u32 pad) { #if defined(CONFIG_MEDIA_CONTROLLER) if (sd->entity.num_pads) { if (pad >= sd->entity.num_pads) return -EINVAL; return 0; } #endif /* allow pad 0 on subdevices not registered as media entities */ if (pad > 0) return -EINVAL; return 0; } static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, u32 which, u32 pad, u32 stream) { if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) if (!v4l2_subdev_state_get_format(state, pad, stream)) return -EINVAL; return 0; #else return -EINVAL; #endif } if (stream != 0) return -EINVAL; if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads)) return -EINVAL; return 0; } static inline int check_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { if (!format) return -EINVAL; return check_which(format->which) ? : check_pad(sd, format->pad) ? : check_state(sd, state, format->which, format->pad, format->stream); } static int call_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { return check_format(sd, state, format) ? : sd->ops->pad->get_fmt(sd, state, format); } static int call_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { return check_format(sd, state, format) ? : sd->ops->pad->set_fmt(sd, state, format); } static int call_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_mbus_code_enum *code) { if (!code) return -EINVAL; return check_which(code->which) ? : check_pad(sd, code->pad) ? : check_state(sd, state, code->which, code->pad, code->stream) ? : sd->ops->pad->enum_mbus_code(sd, state, code); } static int call_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_frame_size_enum *fse) { if (!fse) return -EINVAL; return check_which(fse->which) ? : check_pad(sd, fse->pad) ? : check_state(sd, state, fse->which, fse->pad, fse->stream) ? : sd->ops->pad->enum_frame_size(sd, state, fse); } static int call_enum_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_frame_interval_enum *fie) { if (!fie) return -EINVAL; return check_which(fie->which) ? : check_pad(sd, fie->pad) ? : check_state(sd, state, fie->which, fie->pad, fie->stream) ? : sd->ops->pad->enum_frame_interval(sd, state, fie); } static inline int check_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel) { if (!sel) return -EINVAL; return check_which(sel->which) ? : check_pad(sd, sel->pad) ? : check_state(sd, state, sel->which, sel->pad, sel->stream); } static int call_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel) { return check_selection(sd, state, sel) ? : sd->ops->pad->get_selection(sd, state, sel); } static int call_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel) { return check_selection(sd, state, sel) ? : sd->ops->pad->set_selection(sd, state, sel); } static inline int check_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_frame_interval *fi) { if (!fi) return -EINVAL; return check_which(fi->which) ? : check_pad(sd, fi->pad) ? : check_state(sd, state, fi->which, fi->pad, fi->stream); } static int call_get_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_frame_interval *fi) { return check_frame_interval(sd, state, fi) ? : sd->ops->pad->get_frame_interval(sd, state, fi); } static int call_set_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_frame_interval *fi) { return check_frame_interval(sd, state, fi) ? : sd->ops->pad->set_frame_interval(sd, state, fi); } static int call_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad, struct v4l2_mbus_frame_desc *fd) { unsigned int i; int ret; #if defined(CONFIG_MEDIA_CONTROLLER) if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) return -EOPNOTSUPP; #endif memset(fd, 0, sizeof(*fd)); ret = sd->ops->pad->get_frame_desc(sd, pad, fd); if (ret) return ret; dev_dbg(sd->dev, "Frame descriptor on pad %u, type %s\n", pad, fd->type == V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL ? "parallel" : fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2 ? "CSI-2" : "unknown"); for (i = 0; i < fd->num_entries; i++) { struct v4l2_mbus_frame_desc_entry *entry = &fd->entry[i]; char buf[20] = ""; if (fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2) WARN_ON(snprintf(buf, sizeof(buf), ", vc %u, dt 0x%02x", entry->bus.csi2.vc, entry->bus.csi2.dt) >= sizeof(buf)); dev_dbg(sd->dev, "\tstream %u, code 0x%04x, length %u, flags 0x%04x%s\n", entry->stream, entry->pixelcode, entry->length, entry->flags, buf); } return 0; } static inline int check_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) { if (!edid) return -EINVAL; if (edid->blocks && edid->edid == NULL) return -EINVAL; return check_pad(sd, edid->pad); } static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) { return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid); } static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) { return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid); } static int call_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad, struct v4l2_dv_timings *timings) { if (!timings) return -EINVAL; return check_pad(sd, pad) ? : sd->ops->pad->s_dv_timings(sd, pad, timings); } static int call_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad, struct v4l2_dv_timings *timings) { if (!timings) return -EINVAL; return check_pad(sd, pad) ? : sd->ops->pad->g_dv_timings(sd, pad, timings); } static int call_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad, struct v4l2_dv_timings *timings) { if (!timings) return -EINVAL; return check_pad(sd, pad) ? : sd->ops->pad->query_dv_timings(sd, pad, timings); } static int call_dv_timings_cap(struct v4l2_subdev *sd, struct v4l2_dv_timings_cap *cap) { if (!cap) return -EINVAL; return check_pad(sd, cap->pad) ? : sd->ops->pad->dv_timings_cap(sd, cap); } static int call_enum_dv_timings(struct v4l2_subdev *sd, struct v4l2_enum_dv_timings *dvt) { if (!dvt) return -EINVAL; return check_pad(sd, dvt->pad) ? : sd->ops->pad->enum_dv_timings(sd, dvt); } static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad, struct v4l2_mbus_config *config) { memset(config, 0, sizeof(*config)); return check_pad(sd, pad) ? : sd->ops->pad->get_mbus_config(sd, pad, config); } static int call_s_stream(struct v4l2_subdev *sd, int enable) { int ret; /* * The .s_stream() operation must never be called to start or stop an * already started or stopped subdev. Catch offenders but don't return * an error yet to avoid regressions. */ if (WARN_ON(sd->s_stream_enabled == !!enable)) return 0; ret = sd->ops->video->s_stream(sd, enable); if (!enable && ret < 0) { dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret); ret = 0; } if (!ret) { sd->s_stream_enabled = enable; if (enable) v4l2_subdev_enable_privacy_led(sd); else v4l2_subdev_disable_privacy_led(sd); } return ret; } #ifdef CONFIG_MEDIA_CONTROLLER /* * Create state-management wrapper for pad ops dealing with subdev state. The * wrapper handles the case where the caller does not provide the called * subdev's state. This should be removed when all the callers are fixed. */ #define DEFINE_STATE_WRAPPER(f, arg_type) \ static int call_##f##_state(struct v4l2_subdev *sd, \ struct v4l2_subdev_state *_state, \ arg_type *arg) \ { \ struct v4l2_subdev_state *state = _state; \ int ret; \ if (!_state) \ state = v4l2_subdev_lock_and_get_active_state(sd); \ ret = call_##f(sd, state, arg); \ if (!_state && state) \ v4l2_subdev_unlock_state(state); \ return ret; \ } #else /* CONFIG_MEDIA_CONTROLLER */ #define DEFINE_STATE_WRAPPER(f, arg_type) \ static int call_##f##_state(struct v4l2_subdev *sd, \ struct v4l2_subdev_state *state, \ arg_type *arg) \ { \ return call_##f(sd, state, arg); \ } #endif /* CONFIG_MEDIA_CONTROLLER */ DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format); DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format); DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum); DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum); DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum); DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection); DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection); static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = { .get_fmt = call_get_fmt_state, .set_fmt = call_set_fmt_state, .enum_mbus_code = call_enum_mbus_code_state, .enum_frame_size = call_enum_frame_size_state, .enum_frame_interval = call_enum_frame_interval_state, .get_selection = call_get_selection_state, .set_selection = call_set_selection_state, .get_frame_interval = call_get_frame_interval, .set_frame_interval = call_set_frame_interval, .get_edid = call_get_edid, .set_edid = call_set_edid, .s_dv_timings = call_s_dv_timings, .g_dv_timings = call_g_dv_timings, .query_dv_timings = call_query_dv_timings, .dv_timings_cap = call_dv_timings_cap, .enum_dv_timings = call_enum_dv_timings, .get_frame_desc = call_get_frame_desc, .get_mbus_config = call_get_mbus_config, }; static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = { .s_stream = call_s_stream, }; const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = { .pad = &v4l2_subdev_call_pad_wrappers, .video = &v4l2_subdev_call_video_wrappers, }; EXPORT_SYMBOL(v4l2_subdev_call_wrappers); #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) static struct v4l2_subdev_state * subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh, unsigned int cmd, void *arg) { u32 which; switch (cmd) { default: return NULL; case VIDIOC_SUBDEV_G_FMT: case VIDIOC_SUBDEV_S_FMT: which = ((struct v4l2_subdev_format *)arg)->which; break; case VIDIOC_SUBDEV_G_CROP: case VIDIOC_SUBDEV_S_CROP: which = ((struct v4l2_subdev_crop *)arg)->which; break; case VIDIOC_SUBDEV_ENUM_MBUS_CODE: which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which; break; case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: which = ((struct v4l2_subdev_frame_size_enum *)arg)->which; break; case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which; break; case VIDIOC_SUBDEV_G_SELECTION: case VIDIOC_SUBDEV_S_SELECTION: which = ((struct v4l2_subdev_selection *)arg)->which; break; case VIDIOC_SUBDEV_G_FRAME_INTERVAL: case VIDIOC_SUBDEV_S_FRAME_INTERVAL: { struct v4l2_subdev_frame_interval *fi = arg; if (!(subdev_fh->client_caps & V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH)) fi->which = V4L2_SUBDEV_FORMAT_ACTIVE; which = fi->which; break; } case VIDIOC_SUBDEV_G_ROUTING: case VIDIOC_SUBDEV_S_ROUTING: which = ((struct v4l2_subdev_routing *)arg)->which; break; } return which == V4L2_SUBDEV_FORMAT_TRY ? subdev_fh->state : v4l2_subdev_get_unlocked_active_state(sd); } static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg, struct v4l2_subdev_state *state) { struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); struct v4l2_fh *vfh = file_to_v4l2_fh(file); struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags); bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS; bool client_supports_streams = subdev_fh->client_caps & V4L2_SUBDEV_CLIENT_CAP_STREAMS; int rval; /* * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS. * Remove this when the API is no longer experimental. */ if (!v4l2_subdev_enable_streams_api) streams_subdev = false; switch (cmd) { case VIDIOC_SUBDEV_QUERYCAP: { struct v4l2_subdev_capability *cap = arg; memset(cap->reserved, 0, sizeof(cap->reserved)); cap->version = LINUX_VERSION_CODE; cap->capabilities = (ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) | (streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0); return 0; } case VIDIOC_QUERYCTRL: /* * TODO: this really should be folded into v4l2_queryctrl (this * currently returns -EINVAL for NULL control handlers). * However, v4l2_queryctrl() is still called directly by * drivers as well and until that has been addressed I believe * it is safer to do the check here. The same is true for the * other control ioctls below. */ if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_queryctrl(vfh->ctrl_handler, arg); case VIDIOC_QUERY_EXT_CTRL: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg); case VIDIOC_QUERYMENU: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_querymenu(vfh->ctrl_handler, arg); case VIDIOC_G_CTRL: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_g_ctrl(vfh->ctrl_handler, arg); case VIDIOC_S_CTRL: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg); case VIDIOC_G_EXT_CTRLS: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_g_ext_ctrls(vfh->ctrl_handler, vdev, sd->v4l2_dev->mdev, arg); case VIDIOC_S_EXT_CTRLS: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, vdev, sd->v4l2_dev->mdev, arg); case VIDIOC_TRY_EXT_CTRLS: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_try_ext_ctrls(vfh->ctrl_handler, vdev, sd->v4l2_dev->mdev, arg); case VIDIOC_DQEVENT: if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) return -ENOIOCTLCMD; return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK); case VIDIOC_SUBSCRIBE_EVENT: if (v4l2_subdev_has_op(sd, core, subscribe_event)) return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg); if ((sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) && vfh->ctrl_handler) return v4l2_ctrl_subdev_subscribe_event(sd, vfh, arg); return -ENOIOCTLCMD; case VIDIOC_UNSUBSCRIBE_EVENT: if (v4l2_subdev_has_op(sd, core, unsubscribe_event)) return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg); if (sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) return v4l2_event_subdev_unsubscribe(sd, vfh, arg); return -ENOIOCTLCMD; #ifdef CONFIG_VIDEO_ADV_DEBUG case VIDIOC_DBG_G_REGISTER: { struct v4l2_dbg_register *p = arg; if (!capable(CAP_SYS_ADMIN)) return -EPERM; return v4l2_subdev_call(sd, core, g_register, p); } case VIDIOC_DBG_S_REGISTER: { struct v4l2_dbg_register *p = arg; if (!capable(CAP_SYS_ADMIN)) return -EPERM; return v4l2_subdev_call(sd, core, s_register, p); } case VIDIOC_DBG_G_CHIP_INFO: { struct v4l2_dbg_chip_info *p = arg; if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr) return -EINVAL; if (sd->ops->core && sd->ops->core->s_register) p->flags |= V4L2_CHIP_FL_WRITABLE; if (sd->ops->core && sd->ops->core->g_register) p->flags |= V4L2_CHIP_FL_READABLE; strscpy(p->name, sd->name, sizeof(p->name)); return 0; } #endif case VIDIOC_LOG_STATUS: { int ret; pr_info("%s: ================= START STATUS =================\n", sd->name); ret = v4l2_subdev_call(sd, core, log_status); pr_info("%s: ================== END STATUS ==================\n", sd->name); return ret; } case VIDIOC_SUBDEV_G_FMT: { struct v4l2_subdev_format *format = arg; if (!client_supports_streams) format->stream = 0; memset(format->reserved, 0, sizeof(format->reserved)); memset(format->format.reserved, 0, sizeof(format->format.reserved)); return v4l2_subdev_call(sd, pad, get_fmt, state, format); } case VIDIOC_SUBDEV_S_FMT: { struct v4l2_subdev_format *format = arg; if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) return -EPERM; if (!client_supports_streams) format->stream = 0; memset(format->reserved, 0, sizeof(format->reserved)); memset(format->format.reserved, 0, sizeof(format->format.reserved)); return v4l2_subdev_call(sd, pad, set_fmt, state, format); } case VIDIOC_SUBDEV_G_CROP: { struct v4l2_subdev_crop *crop = arg; struct v4l2_subdev_selection sel; if (!client_supports_streams) crop->stream = 0; memset(crop->reserved, 0, sizeof(crop->reserved)); memset(&sel, 0, sizeof(sel)); sel.which = crop->which; sel.pad = crop->pad; sel.stream = crop->stream; sel.target = V4L2_SEL_TGT_CROP; rval = v4l2_subdev_call( sd, pad, get_selection, state, &sel); crop->rect = sel.r; return rval; } case VIDIOC_SUBDEV_S_CROP: { struct v4l2_subdev_crop *crop = arg; struct v4l2_subdev_selection sel; if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) return -EPERM; if (!client_supports_streams) crop->stream = 0; memset(crop->reserved, 0, sizeof(crop->reserved)); memset(&sel, 0, sizeof(sel)); sel.which = crop->which; sel.pad = crop->pad; sel.stream = crop->stream; sel.target = V4L2_SEL_TGT_CROP; sel.r = crop->rect; rval = v4l2_subdev_call( sd, pad, set_selection, state, &sel); crop->rect = sel.r; return rval; } case VIDIOC_SUBDEV_ENUM_MBUS_CODE: { struct v4l2_subdev_mbus_code_enum *code = arg; if (!client_supports_streams) code->stream = 0; memset(code->reserved, 0, sizeof(code->reserved)); return v4l2_subdev_call(sd, pad, enum_mbus_code, state, code); } case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: { struct v4l2_subdev_frame_size_enum *fse = arg; if (!client_supports_streams) fse->stream = 0; memset(fse->reserved, 0, sizeof(fse->reserved)); return v4l2_subdev_call(sd, pad, enum_frame_size, state, fse); } case VIDIOC_SUBDEV_G_FRAME_INTERVAL: { struct v4l2_subdev_frame_interval *fi = arg; if (!client_supports_streams) fi->stream = 0; memset(fi->reserved, 0, sizeof(fi->reserved)); return v4l2_subdev_call(sd, pad, get_frame_interval, state, fi); } case VIDIOC_SUBDEV_S_FRAME_INTERVAL: { struct v4l2_subdev_frame_interval *fi = arg; if (!client_supports_streams) fi->stream = 0; if (fi->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) return -EPERM; memset(fi->reserved, 0, sizeof(fi->reserved)); return v4l2_subdev_call(sd, pad, set_frame_interval, state, fi); } case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: { struct v4l2_subdev_frame_interval_enum *fie = arg; if (!client_supports_streams) fie->stream = 0; memset(fie->reserved, 0, sizeof(fie->reserved)); return v4l2_subdev_call(sd, pad, enum_frame_interval, state, fie); } case VIDIOC_SUBDEV_G_SELECTION: { struct v4l2_subdev_selection *sel = arg; if (!client_supports_streams) sel->stream = 0; memset(sel->reserved, 0, sizeof(sel->reserved)); return v4l2_subdev_call( sd, pad, get_selection, state, sel); } case VIDIOC_SUBDEV_S_SELECTION: { struct v4l2_subdev_selection *sel = arg; if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) return -EPERM; if (!client_supports_streams) sel->stream = 0; memset(sel->reserved, 0, sizeof(sel->reserved)); return v4l2_subdev_call( sd, pad, set_selection, state, sel); } case VIDIOC_G_EDID: { struct v4l2_subdev_edid *edid = arg; return v4l2_subdev_call(sd, pad, get_edid, edid); } case VIDIOC_S_EDID: { struct v4l2_subdev_edid *edid = arg; return v4l2_subdev_call(sd, pad, set_edid, edid); } case VIDIOC_SUBDEV_DV_TIMINGS_CAP: { struct v4l2_dv_timings_cap *cap = arg; return v4l2_subdev_call(sd, pad, dv_timings_cap, cap); } case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: { struct v4l2_enum_dv_timings *dvt = arg; return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt); } case VIDIOC_SUBDEV_QUERY_DV_TIMINGS: return v4l2_subdev_call(sd, pad, query_dv_timings, 0, arg); case VIDIOC_SUBDEV_G_DV_TIMINGS: return v4l2_subdev_call(sd, pad, g_dv_timings, 0, arg); case VIDIOC_SUBDEV_S_DV_TIMINGS: if (ro_subdev) return -EPERM; return v4l2_subdev_call(sd, pad, s_dv_timings, 0, arg); case VIDIOC_SUBDEV_G_STD: return v4l2_subdev_call(sd, video, g_std, arg); case VIDIOC_SUBDEV_S_STD: { v4l2_std_id *std = arg; if (ro_subdev) return -EPERM; return v4l2_subdev_call(sd, video, s_std, *std); } case VIDIOC_SUBDEV_ENUMSTD: { struct v4l2_standard *p = arg; v4l2_std_id id; if (v4l2_subdev_call(sd, video, g_tvnorms, &id)) return -EINVAL; return v4l_video_std_enumstd(p, id); } case VIDIOC_SUBDEV_QUERYSTD: return v4l2_subdev_call(sd, video, querystd, arg); case VIDIOC_SUBDEV_G_ROUTING: { struct v4l2_subdev_routing *routing = arg; struct v4l2_subdev_krouting *krouting; if (!v4l2_subdev_enable_streams_api) return -ENOIOCTLCMD; if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) return -ENOIOCTLCMD; memset(routing->reserved, 0, sizeof(routing->reserved)); krouting = &state->routing; memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, krouting->routes, min(krouting->num_routes, routing->len_routes) * sizeof(*krouting->routes)); routing->num_routes = krouting->num_routes; return 0; } case VIDIOC_SUBDEV_S_ROUTING: { struct v4l2_subdev_routing *routing = arg; struct v4l2_subdev_route *routes = (struct v4l2_subdev_route *)(uintptr_t)routing->routes; struct v4l2_subdev_krouting krouting = {}; unsigned int num_active_routes = 0; unsigned int i; if (!v4l2_subdev_enable_streams_api) return -ENOIOCTLCMD; if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) return -ENOIOCTLCMD; if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) return -EPERM; if (routing->num_routes > routing->len_routes) return -EINVAL; memset(routing->reserved, 0, sizeof(routing->reserved)); for (i = 0; i < routing->num_routes; ++i) { const struct v4l2_subdev_route *route = &routes[i]; const struct media_pad *pads = sd->entity.pads; if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID || route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID) return -EINVAL; if (route->sink_pad >= sd->entity.num_pads) return -EINVAL; if (!(pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) return -EINVAL; if (route->source_pad >= sd->entity.num_pads) return -EINVAL; if (!(pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) return -EINVAL; if (route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) num_active_routes++; } /* * Drivers that implement routing need to report a frame * descriptor accordingly, with up to one entry per route. Until * the frame descriptors entries get allocated dynamically, * limit the number of active routes to * V4L2_FRAME_DESC_ENTRY_MAX. */ if (num_active_routes > V4L2_FRAME_DESC_ENTRY_MAX) return -E2BIG; /* * If the driver doesn't support setting routing, just return * the routing table. */ if (!v4l2_subdev_has_op(sd, pad, set_routing)) { memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, state->routing.routes, min(state->routing.num_routes, routing->len_routes) * sizeof(*state->routing.routes)); routing->num_routes = state->routing.num_routes; return 0; } krouting.num_routes = routing->num_routes; krouting.len_routes = routing->len_routes; krouting.routes = routes; rval = v4l2_subdev_call(sd, pad, set_routing, state, routing->which, &krouting); if (rval < 0) return rval; memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, state->routing.routes, min(state->routing.num_routes, routing->len_routes) * sizeof(*state->routing.routes)); routing->num_routes = state->routing.num_routes; return 0; } case VIDIOC_SUBDEV_G_CLIENT_CAP: { struct v4l2_subdev_client_capability *client_cap = arg; client_cap->capabilities = subdev_fh->client_caps; return 0; } case VIDIOC_SUBDEV_S_CLIENT_CAP: { struct v4l2_subdev_client_capability *client_cap = arg; /* * Clear V4L2_SUBDEV_CLIENT_CAP_STREAMS if streams API is not * enabled. Remove this when streams API is no longer * experimental. */ if (!v4l2_subdev_enable_streams_api) client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS; /* Filter out unsupported capabilities */ client_cap->capabilities &= (V4L2_SUBDEV_CLIENT_CAP_STREAMS | V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH); subdev_fh->client_caps = client_cap->capabilities; return 0; } default: return v4l2_subdev_call(sd, core, ioctl, cmd, arg); } return 0; } static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg) { struct video_device *vdev = video_devdata(file); struct mutex *lock = vdev->lock; long ret = -ENODEV; if (lock && mutex_lock_interruptible(lock)) return -ERESTARTSYS; if (video_is_registered(vdev)) { struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); struct v4l2_fh *vfh = file_to_v4l2_fh(file); struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); struct v4l2_subdev_state *state; state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg); if (state) v4l2_subdev_lock_state(state); ret = subdev_do_ioctl(file, cmd, arg, state); if (state) v4l2_subdev_unlock_state(state); } if (lock) mutex_unlock(lock); return ret; } static long subdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock); } #ifdef CONFIG_COMPAT static long subdev_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) { struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg); } #endif #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */ static long subdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return -ENODEV; } #ifdef CONFIG_COMPAT static long subdev_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) { return -ENODEV; } #endif #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ static __poll_t subdev_poll(struct file *file, poll_table *wait) { struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); struct v4l2_fh *fh = file_to_v4l2_fh(file); if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) return EPOLLERR; poll_wait(file, &fh->wait, wait); if (v4l2_event_pending(fh)) return EPOLLPRI; return 0; } const struct v4l2_file_operations v4l2_subdev_fops = { .owner = THIS_MODULE, .open = subdev_open, .unlocked_ioctl = subdev_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl32 = subdev_compat_ioctl32, #endif .release = subdev_close, .poll = subdev_poll, }; #ifdef CONFIG_MEDIA_CONTROLLER int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity, struct fwnode_endpoint *endpoint) { struct fwnode_handle *fwnode; struct v4l2_subdev *sd; if (!is_media_entity_v4l2_subdev(entity)) return -EINVAL; sd = media_entity_to_v4l2_subdev(entity); fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode); fwnode_handle_put(fwnode); if (device_match_fwnode(sd->dev, fwnode)) return endpoint->port; return -ENXIO; } EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1); int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd, struct media_link *link, struct v4l2_subdev_format *source_fmt, struct v4l2_subdev_format *sink_fmt) { bool pass = true; /* The width, height and code must match. */ if (source_fmt->format.width != sink_fmt->format.width) { dev_dbg(sd->entity.graph_obj.mdev->dev, "%s: width does not match (source %u, sink %u)\n", __func__, source_fmt->format.width, sink_fmt->format.width); pass = false; } if (source_fmt->format.height != sink_fmt->format.height) { dev_dbg(sd->entity.graph_obj.mdev->dev, "%s: height does not match (source %u, sink %u)\n", __func__, source_fmt->format.height, sink_fmt->format.height); pass = false; } if (source_fmt->format.code != sink_fmt->format.code) { dev_dbg(sd->entity.graph_obj.mdev->dev, "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n", __func__, source_fmt->format.code, sink_fmt->format.code); pass = false; } /* The field order must match, or the sink field order must be NONE * to support interlaced hardware connected to bridges that support * progressive formats only. */ if (source_fmt->format.field != sink_fmt->format.field && sink_fmt->format.field != V4L2_FIELD_NONE) { dev_dbg(sd->entity.graph_obj.mdev->dev, "%s: field does not match (source %u, sink %u)\n", __func__, source_fmt->format.field, sink_fmt->format.field); pass = false; } if (pass) return 0; dev_dbg(sd->entity.graph_obj.mdev->dev, "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__, link->source->entity->name, link->source->index, link->sink->entity->name, link->sink->index); return -EPIPE; } EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default); static int v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream, struct v4l2_subdev_format *fmt, bool states_locked) { struct v4l2_subdev_state *state; struct v4l2_subdev *sd; int ret; sd = media_entity_to_v4l2_subdev(pad->entity); fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE; fmt->pad = pad->index; fmt->stream = stream; if (states_locked) state = v4l2_subdev_get_locked_active_state(sd); else state = v4l2_subdev_lock_and_get_active_state(sd); ret = v4l2_subdev_call(sd, pad, get_fmt, state, fmt); if (!states_locked && state) v4l2_subdev_unlock_state(state); return ret; } #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) static void __v4l2_link_validate_get_streams(struct media_pad *pad, u64 *streams_mask, bool states_locked) { struct v4l2_subdev_route *route; struct v4l2_subdev_state *state; struct v4l2_subdev *subdev; subdev = media_entity_to_v4l2_subdev(pad->entity); *streams_mask = 0; if (states_locked) state = v4l2_subdev_get_locked_active_state(subdev); else state = v4l2_subdev_lock_and_get_active_state(subdev); if (WARN_ON(!state)) return; for_each_active_route(&state->routing, route) { u32 route_pad; u32 route_stream; if (pad->flags & MEDIA_PAD_FL_SOURCE) { route_pad = route->source_pad; route_stream = route->source_stream; } else { route_pad = route->sink_pad; route_stream = route->sink_stream; } if (route_pad != pad->index) continue; *streams_mask |= BIT_ULL(route_stream); } if (!states_locked) v4l2_subdev_unlock_state(state); } #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ static void v4l2_link_validate_get_streams(struct media_pad *pad, u64 *streams_mask, bool states_locked) { struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity); if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) { /* Non-streams subdevs have an implicit stream 0 */ *streams_mask = BIT_ULL(0); return; } #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) __v4l2_link_validate_get_streams(pad, streams_mask, states_locked); #else /* This shouldn't happen */ *streams_mask = 0; #endif } static int v4l2_subdev_link_validate_locked(struct media_link *link, bool states_locked) { struct v4l2_subdev *sink_subdev = media_entity_to_v4l2_subdev(link->sink->entity); struct device *dev = sink_subdev->entity.graph_obj.mdev->dev; u64 source_streams_mask; u64 sink_streams_mask; u64 dangling_sink_streams; u32 stream; int ret; dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n", link->source->entity->name, link->source->index, link->sink->entity->name, link->sink->index); v4l2_link_validate_get_streams(link->source, &source_streams_mask, states_locked); v4l2_link_validate_get_streams(link->sink, &sink_streams_mask, states_locked); /* * It is ok to have more source streams than sink streams as extra * source streams can just be ignored by the receiver, but having extra * sink streams is an error as streams must have a source. */ dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) & sink_streams_mask; if (dangling_sink_streams) { dev_err(dev, "Dangling sink streams: mask %#llx\n", dangling_sink_streams); return -EINVAL; } /* Validate source and sink stream formats */ for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) { struct v4l2_subdev_format sink_fmt, source_fmt; if (!(sink_streams_mask & BIT_ULL(stream))) continue; dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n", link->source->entity->name, link->source->index, stream, link->sink->entity->name, link->sink->index, stream); ret = v4l2_subdev_link_validate_get_format(link->source, stream, &source_fmt, states_locked); if (ret < 0) { dev_dbg(dev, "Failed to get format for \"%s\":%u:%u (but that's ok)\n", link->source->entity->name, link->source->index, stream); continue; } ret = v4l2_subdev_link_validate_get_format(link->sink, stream, &sink_fmt, states_locked); if (ret < 0) { dev_dbg(dev, "Failed to get format for \"%s\":%u:%u (but that's ok)\n", link->sink->entity->name, link->sink->index, stream); continue; } /* TODO: add stream number to link_validate() */ ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link, &source_fmt, &sink_fmt); if (!ret) continue; if (ret != -ENOIOCTLCMD) return ret; ret = v4l2_subdev_link_validate_default(sink_subdev, link, &source_fmt, &sink_fmt); if (ret) return ret; } return 0; } int v4l2_subdev_link_validate(struct media_link *link) { struct v4l2_subdev *source_sd, *sink_sd; struct v4l2_subdev_state *source_state, *sink_state; bool states_locked; int ret; /* * Links are validated in the context of the sink entity. Usage of this * helper on a sink that is not a subdev is a clear driver bug. */ if (WARN_ON_ONCE(!is_media_entity_v4l2_subdev(link->sink->entity))) return -EINVAL; /* * If the source is a video device, delegate link validation to it. This * allows usage of this helper for subdev connected to a video output * device, provided that the driver implement the video output device's * .link_validate() operation. */ if (is_media_entity_v4l2_video_device(link->source->entity)) { struct media_entity *source = link->source->entity; if (!source->ops || !source->ops->link_validate) { /* * Many existing drivers do not implement the required * .link_validate() operation for their video devices. * Print a warning to get the drivers fixed, and return * 0 to avoid breaking userspace. This should * eventually be turned into a WARN_ON() when all * drivers will have been fixed. */ pr_warn_once("video device '%s' does not implement .link_validate(), driver bug!\n", source->name); return 0; } /* * Avoid infinite loops in case a video device incorrectly uses * this helper function as its .link_validate() handler. */ if (WARN_ON(source->ops->link_validate == v4l2_subdev_link_validate)) return -EINVAL; return source->ops->link_validate(link); } /* * If the source is still not a subdev, usage of this helper is a clear * driver bug. */ if (WARN_ON(!is_media_entity_v4l2_subdev(link->source->entity))) return -EINVAL; sink_sd = media_entity_to_v4l2_subdev(link->sink->entity); source_sd = media_entity_to_v4l2_subdev(link->source->entity); sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd); source_state = v4l2_subdev_get_unlocked_active_state(source_sd); states_locked = sink_state && source_state; if (states_locked) v4l2_subdev_lock_states(sink_state, source_state); ret = v4l2_subdev_link_validate_locked(link, states_locked); if (states_locked) v4l2_subdev_unlock_states(sink_state, source_state); return ret; } EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate); bool v4l2_subdev_has_pad_interdep(struct media_entity *entity, unsigned int pad0, unsigned int pad1) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct v4l2_subdev_krouting *routing; struct v4l2_subdev_state *state; unsigned int i; state = v4l2_subdev_lock_and_get_active_state(sd); routing = &state->routing; for (i = 0; i < routing->num_routes; ++i) { struct v4l2_subdev_route *route = &routing->routes[i]; if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) continue; if ((route->sink_pad == pad0 && route->source_pad == pad1) || (route->source_pad == pad0 && route->sink_pad == pad1)) { v4l2_subdev_unlock_state(state); return true; } } v4l2_subdev_unlock_state(state); return false; } EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep); struct v4l2_subdev_state * __v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name, struct lock_class_key *lock_key) { struct v4l2_subdev_state *state; int ret; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return ERR_PTR(-ENOMEM); __mutex_init(&state->_lock, lock_name, lock_key); if (sd->state_lock) state->lock = sd->state_lock; else state->lock = &state->_lock; state->sd = sd; /* Drivers that support streams do not need the legacy pad config */ if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) { state->pads = kvcalloc(sd->entity.num_pads, sizeof(*state->pads), GFP_KERNEL); if (!state->pads) { ret = -ENOMEM; goto err; } } if (sd->internal_ops && sd->internal_ops->init_state) { /* * There can be no race at this point, but we lock the state * anyway to satisfy lockdep checks. */ v4l2_subdev_lock_state(state); ret = sd->internal_ops->init_state(sd, state); v4l2_subdev_unlock_state(state); if (ret) goto err; } return state; err: if (state && state->pads) kvfree(state->pads); kfree(state); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc); void __v4l2_subdev_state_free(struct v4l2_subdev_state *state) { if (!state) return; mutex_destroy(&state->_lock); kfree(state->routing.routes); kvfree(state->stream_configs.configs); kvfree(state->pads); kfree(state); } EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free); int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name, struct lock_class_key *key) { struct v4l2_subdev_state *state; struct device *dev = sd->dev; bool has_disable_streams; bool has_enable_streams; bool has_s_stream; /* Check that the subdevice implements the required features */ has_s_stream = v4l2_subdev_has_op(sd, video, s_stream); has_enable_streams = v4l2_subdev_has_op(sd, pad, enable_streams); has_disable_streams = v4l2_subdev_has_op(sd, pad, disable_streams); if (has_enable_streams != has_disable_streams) { dev_err(dev, "subdev '%s' must implement both or neither of .enable_streams() and .disable_streams()\n", sd->name); return -EINVAL; } if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { if (has_s_stream && !has_enable_streams) { dev_err(dev, "subdev '%s' must implement .enable/disable_streams()\n", sd->name); return -EINVAL; } } if (sd->ctrl_handler) sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS; state = __v4l2_subdev_state_alloc(sd, name, key); if (IS_ERR(state)) return PTR_ERR(state); sd->active_state = state; return 0; } EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize); void v4l2_subdev_cleanup(struct v4l2_subdev *sd) { struct v4l2_async_subdev_endpoint *ase, *ase_tmp; __v4l2_subdev_state_free(sd->active_state); sd->active_state = NULL; /* Uninitialised sub-device, bail out here. */ if (!sd->async_subdev_endpoint_list.next) return; list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list, async_subdev_endpoint_entry) { list_del(&ase->async_subdev_endpoint_entry); kfree(ase); } } EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup); struct v4l2_mbus_framefmt * __v4l2_subdev_state_get_format(struct v4l2_subdev_state *state, unsigned int pad, u32 stream) { struct v4l2_subdev_stream_configs *stream_configs; unsigned int i; if (WARN_ON_ONCE(!state)) return NULL; if (state->pads) { if (stream) return NULL; if (pad >= state->sd->entity.num_pads) return NULL; return &state->pads[pad].format; } lockdep_assert_held(state->lock); stream_configs = &state->stream_configs; for (i = 0; i < stream_configs->num_configs; ++i) { if (stream_configs->configs[i].pad == pad && stream_configs->configs[i].stream == stream) return &stream_configs->configs[i].fmt; } return NULL; } EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_format); struct v4l2_rect * __v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad, u32 stream) { struct v4l2_subdev_stream_configs *stream_configs; unsigned int i; if (WARN_ON_ONCE(!state)) return NULL; if (state->pads) { if (stream) return NULL; if (pad >= state->sd->entity.num_pads) return NULL; return &state->pads[pad].crop; } lockdep_assert_held(state->lock); stream_configs = &state->stream_configs; for (i = 0; i < stream_configs->num_configs; ++i) { if (stream_configs->configs[i].pad == pad && stream_configs->configs[i].stream == stream) return &stream_configs->configs[i].crop; } return NULL; } EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_crop); struct v4l2_rect * __v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state, unsigned int pad, u32 stream) { struct v4l2_subdev_stream_configs *stream_configs; unsigned int i; if (WARN_ON_ONCE(!state)) return NULL; if (state->pads) { if (stream) return NULL; if (pad >= state->sd->entity.num_pads) return NULL; return &state->pads[pad].compose; } lockdep_assert_held(state->lock); stream_configs = &state->stream_configs; for (i = 0; i < stream_configs->num_configs; ++i) { if (stream_configs->configs[i].pad == pad && stream_configs->configs[i].stream == stream) return &stream_configs->configs[i].compose; } return NULL; } EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_compose); struct v4l2_fract * __v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state, unsigned int pad, u32 stream) { struct v4l2_subdev_stream_configs *stream_configs; unsigned int i; if (WARN_ON(!state)) return NULL; lockdep_assert_held(state->lock); if (state->pads) { if (stream) return NULL; if (pad >= state->sd->entity.num_pads) return NULL; return &state->pads[pad].interval; } lockdep_assert_held(state->lock); stream_configs = &state->stream_configs; for (i = 0; i < stream_configs->num_configs; ++i) { if (stream_configs->configs[i].pad == pad && stream_configs->configs[i].stream == stream) return &stream_configs->configs[i].interval; } return NULL; } EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_interval); #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) static int v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs, const struct v4l2_subdev_krouting *routing) { struct v4l2_subdev_stream_configs new_configs = { 0 }; struct v4l2_subdev_route *route; u32 idx; /* Count number of formats needed */ for_each_active_route(routing, route) { /* * Each route needs a format on both ends of the route. */ new_configs.num_configs += 2; } if (new_configs.num_configs) { new_configs.configs = kvcalloc(new_configs.num_configs, sizeof(*new_configs.configs), GFP_KERNEL); if (!new_configs.configs) return -ENOMEM; } /* * Fill in the 'pad' and stream' value for each item in the array from * the routing table */ idx = 0; for_each_active_route(routing, route) { new_configs.configs[idx].pad = route->sink_pad; new_configs.configs[idx].stream = route->sink_stream; idx++; new_configs.configs[idx].pad = route->source_pad; new_configs.configs[idx].stream = route->source_stream; idx++; } kvfree(stream_configs->configs); *stream_configs = new_configs; return 0; } int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *fmt; fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream); if (!fmt) return -EINVAL; format->format = *fmt; return 0; } EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt); int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_frame_interval *fi) { struct v4l2_fract *interval; interval = v4l2_subdev_state_get_interval(state, fi->pad, fi->stream); if (!interval) return -EINVAL; fi->interval = *interval; return 0; } EXPORT_SYMBOL_GPL(v4l2_subdev_get_frame_interval); int v4l2_subdev_set_routing(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, const struct v4l2_subdev_krouting *routing) { struct v4l2_subdev_krouting *dst = &state->routing; const struct v4l2_subdev_krouting *src = routing; struct v4l2_subdev_krouting new_routing = { 0 }; size_t bytes; int r; if (unlikely(check_mul_overflow((size_t)src->num_routes, sizeof(*src->routes), &bytes))) return -EOVERFLOW; lockdep_assert_held(state->lock); if (src->num_routes > 0) { new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL); if (!new_routing.routes) return -ENOMEM; } new_routing.num_routes = src->num_routes; r = v4l2_subdev_init_stream_configs(&state->stream_configs, &new_routing); if (r) { kfree(new_routing.routes); return r; } kfree(dst->routes); *dst = new_routing; return 0; } EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing); struct v4l2_subdev_route * __v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing, struct v4l2_subdev_route *route) { if (route) ++route; else route = &routing->routes[0]; for (; route < routing->routes + routing->num_routes; ++route) { if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) continue; return route; } return NULL; } EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route); int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, const struct v4l2_subdev_krouting *routing, const struct v4l2_mbus_framefmt *fmt) { struct v4l2_subdev_stream_configs *stream_configs; unsigned int i; int ret; ret = v4l2_subdev_set_routing(sd, state, routing); if (ret) return ret; stream_configs = &state->stream_configs; for (i = 0; i < stream_configs->num_configs; ++i) stream_configs->configs[i].fmt = *fmt; return 0; } EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt); int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing, u32 pad, u32 stream, u32 *other_pad, u32 *other_stream) { unsigned int i; for (i = 0; i < routing->num_routes; ++i) { struct v4l2_subdev_route *route = &routing->routes[i]; if (route->source_pad == pad && route->source_stream == stream) { if (other_pad) *other_pad = route->sink_pad; if (other_stream) *other_stream = route->sink_stream; return 0; } if (route->sink_pad == pad && route->sink_stream == stream) { if (other_pad) *other_pad = route->source_pad; if (other_stream) *other_stream = route->source_stream; return 0; } } return -EINVAL; } EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end); struct v4l2_mbus_framefmt * v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state, u32 pad, u32 stream) { u32 other_pad, other_stream; int ret; ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, stream, &other_pad, &other_stream); if (ret) return NULL; return v4l2_subdev_state_get_format(state, other_pad, other_stream); } EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format); u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state, u32 pad0, u32 pad1, u64 *streams) { const struct v4l2_subdev_krouting *routing = &state->routing; struct v4l2_subdev_route *route; u64 streams0 = 0; u64 streams1 = 0; for_each_active_route(routing, route) { if (route->sink_pad == pad0 && route->source_pad == pad1 && (*streams & BIT_ULL(route->sink_stream))) { streams0 |= BIT_ULL(route->sink_stream); streams1 |= BIT_ULL(route->source_stream); } if (route->source_pad == pad0 && route->sink_pad == pad1 && (*streams & BIT_ULL(route->source_stream))) { streams0 |= BIT_ULL(route->source_stream); streams1 |= BIT_ULL(route->sink_stream); } } *streams = streams0; return streams1; } EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams); int v4l2_subdev_routing_validate(struct v4l2_subdev *sd, const struct v4l2_subdev_krouting *routing, enum v4l2_subdev_routing_restriction disallow) { u32 *remote_pads = NULL; unsigned int i, j; int ret = -EINVAL; if (disallow & (V4L2_SUBDEV_ROUTING_NO_STREAM_MIX | V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING)) { remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads), GFP_KERNEL); if (!remote_pads) return -ENOMEM; for (i = 0; i < sd->entity.num_pads; ++i) remote_pads[i] = U32_MAX; } for (i = 0; i < routing->num_routes; ++i) { const struct v4l2_subdev_route *route = &routing->routes[i]; /* Validate the sink and source pad numbers. */ if (route->sink_pad >= sd->entity.num_pads || !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) { dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n", i, route->sink_pad); goto out; } if (route->source_pad >= sd->entity.num_pads || !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) { dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n", i, route->source_pad); goto out; } /* * V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: all streams from a * sink pad must be routed to a single source pad. */ if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX) { if (remote_pads[route->sink_pad] != U32_MAX && remote_pads[route->sink_pad] != route->source_pad) { dev_dbg(sd->dev, "route %u attempts to mix %s streams\n", i, "sink"); goto out; } } /* * V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: all streams on a * source pad must originate from a single sink pad. */ if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) { if (remote_pads[route->source_pad] != U32_MAX && remote_pads[route->source_pad] != route->sink_pad) { dev_dbg(sd->dev, "route %u attempts to mix %s streams\n", i, "source"); goto out; } } /* * V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink * side can not do stream multiplexing, i.e. there can be only * a single stream in a sink pad. */ if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING) { if (remote_pads[route->sink_pad] != U32_MAX) { dev_dbg(sd->dev, "route %u attempts to multiplex on %s pad %u\n", i, "sink", route->sink_pad); goto out; } } /* * V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: Pads on the * source side can not do stream multiplexing, i.e. there can * be only a single stream in a source pad. */ if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING) { if (remote_pads[route->source_pad] != U32_MAX) { dev_dbg(sd->dev, "route %u attempts to multiplex on %s pad %u\n", i, "source", route->source_pad); goto out; } } if (remote_pads) { remote_pads[route->sink_pad] = route->source_pad; remote_pads[route->source_pad] = route->sink_pad; } for (j = i + 1; j < routing->num_routes; ++j) { const struct v4l2_subdev_route *r = &routing->routes[j]; /* * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can * originate from the same (sink) stream. */ if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) && route->sink_pad == r->sink_pad && route->sink_stream == r->sink_stream) { dev_dbg(sd->dev, "routes %u and %u originate from same sink (%u/%u)\n", i, j, route->sink_pad, route->sink_stream); goto out; } /* * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end * at the same (source) stream. */ if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) && route->source_pad == r->source_pad && route->source_stream == r->source_stream) { dev_dbg(sd->dev, "routes %u and %u end at same source (%u/%u)\n", i, j, route->source_pad, route->source_stream); goto out; } } } ret = 0; out: kfree(remote_pads); return ret; } EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate); static void v4l2_subdev_collect_streams(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, u32 pad, u64 streams_mask, u64 *found_streams, u64 *enabled_streams) { if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) { *found_streams = BIT_ULL(0); *enabled_streams = (sd->enabled_pads & BIT_ULL(pad)) ? BIT_ULL(0) : 0; dev_dbg(sd->dev, "collect_streams: sub-device \"%s\" does not support streams\n", sd->entity.name); return; } *found_streams = 0; *enabled_streams = 0; for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { const struct v4l2_subdev_stream_config *cfg = &state->stream_configs.configs[i]; if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream))) continue; *found_streams |= BIT_ULL(cfg->stream); if (cfg->enabled) *enabled_streams |= BIT_ULL(cfg->stream); } dev_dbg(sd->dev, "collect_streams: \"%s\":%u: found %#llx enabled %#llx\n", sd->entity.name, pad, *found_streams, *enabled_streams); } static void v4l2_subdev_set_streams_enabled(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, u32 pad, u64 streams_mask, bool enabled) { if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) { if (enabled) sd->enabled_pads |= BIT_ULL(pad); else sd->enabled_pads &= ~BIT_ULL(pad); return; } for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { struct v4l2_subdev_stream_config *cfg = &state->stream_configs.configs[i]; if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream))) cfg->enabled = enabled; } } int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad, u64 streams_mask) { struct device *dev = sd->entity.graph_obj.mdev->dev; struct v4l2_subdev_state *state; bool already_streaming; u64 enabled_streams; u64 found_streams; bool use_s_stream; int ret; dev_dbg(dev, "enable streams \"%s\":%u/%#llx\n", sd->entity.name, pad, streams_mask); /* A few basic sanity checks first. */ if (pad >= sd->entity.num_pads) return -EINVAL; if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) return -EOPNOTSUPP; /* * We use a 64-bit bitmask for tracking enabled pads, so only subdevices * with 64 pads or less can be supported. */ if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) return -EOPNOTSUPP; if (!streams_mask) return 0; /* Fallback on .s_stream() if .enable_streams() isn't available. */ use_s_stream = !v4l2_subdev_has_op(sd, pad, enable_streams); if (!use_s_stream) state = v4l2_subdev_lock_and_get_active_state(sd); else state = NULL; /* * Verify that the requested streams exist and that they are not * already enabled. */ v4l2_subdev_collect_streams(sd, state, pad, streams_mask, &found_streams, &enabled_streams); if (found_streams != streams_mask) { dev_dbg(dev, "streams 0x%llx not found on %s:%u\n", streams_mask & ~found_streams, sd->entity.name, pad); ret = -EINVAL; goto done; } if (enabled_streams) { dev_dbg(dev, "streams 0x%llx already enabled on %s:%u\n", enabled_streams, sd->entity.name, pad); ret = -EALREADY; goto done; } already_streaming = v4l2_subdev_is_streaming(sd); if (!use_s_stream) { /* Call the .enable_streams() operation. */ ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad, streams_mask); } else { /* Start streaming when the first pad is enabled. */ if (!already_streaming) ret = v4l2_subdev_call(sd, video, s_stream, 1); else ret = 0; } if (ret) { dev_dbg(dev, "enable streams %u:%#llx failed: %d\n", pad, streams_mask, ret); goto done; } /* Mark the streams as enabled. */ v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, true); /* * TODO: When all the drivers have been changed to use * v4l2_subdev_enable_streams() and v4l2_subdev_disable_streams(), * instead of calling .s_stream() operation directly, we can remove * the privacy LED handling from call_s_stream() and do it here * for all cases. */ if (!use_s_stream && !already_streaming) v4l2_subdev_enable_privacy_led(sd); done: if (!use_s_stream) v4l2_subdev_unlock_state(state); return ret; } EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams); int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad, u64 streams_mask) { struct device *dev = sd->entity.graph_obj.mdev->dev; struct v4l2_subdev_state *state; u64 enabled_streams; u64 found_streams; bool use_s_stream; int ret; dev_dbg(dev, "disable streams \"%s\":%u/%#llx\n", sd->entity.name, pad, streams_mask); /* A few basic sanity checks first. */ if (pad >= sd->entity.num_pads) return -EINVAL; if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) return -EOPNOTSUPP; /* * We use a 64-bit bitmask for tracking enabled pads, so only subdevices * with 64 pads or less can be supported. */ if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) return -EOPNOTSUPP; if (!streams_mask) return 0; /* Fallback on .s_stream() if .disable_streams() isn't available. */ use_s_stream = !v4l2_subdev_has_op(sd, pad, disable_streams); if (!use_s_stream) state = v4l2_subdev_lock_and_get_active_state(sd); else state = NULL; /* * Verify that the requested streams exist and that they are not * already disabled. */ v4l2_subdev_collect_streams(sd, state, pad, streams_mask, &found_streams, &enabled_streams); if (found_streams != streams_mask) { dev_dbg(dev, "streams 0x%llx not found on %s:%u\n", streams_mask & ~found_streams, sd->entity.name, pad); ret = -EINVAL; goto done; } if (enabled_streams != streams_mask) { dev_dbg(dev, "streams 0x%llx already disabled on %s:%u\n", streams_mask & ~enabled_streams, sd->entity.name, pad); ret = -EALREADY; goto done; } if (!use_s_stream) { /* Call the .disable_streams() operation. */ ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad, streams_mask); } else { /* Stop streaming when the last streams are disabled. */ if (!(sd->enabled_pads & ~BIT_ULL(pad))) ret = v4l2_subdev_call(sd, video, s_stream, 0); else ret = 0; } if (ret) { dev_dbg(dev, "disable streams %u:%#llx failed: %d\n", pad, streams_mask, ret); goto done; } v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, false); done: if (!use_s_stream) { if (!v4l2_subdev_is_streaming(sd)) v4l2_subdev_disable_privacy_led(sd); v4l2_subdev_unlock_state(state); } return ret; } EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams); int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable) { struct v4l2_subdev_state *state; struct v4l2_subdev_route *route; struct media_pad *pad; u64 source_mask = 0; int pad_index = -1; /* * Find the source pad. This helper is meant for subdevs that have a * single source pad, so failures shouldn't happen, but catch them * loudly nonetheless as they indicate a driver bug. */ media_entity_for_each_pad(&sd->entity, pad) { if (pad->flags & MEDIA_PAD_FL_SOURCE) { pad_index = pad->index; break; } } if (WARN_ON(pad_index == -1)) return -EINVAL; if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { /* * As there's a single source pad, just collect all the source * streams. */ state = v4l2_subdev_lock_and_get_active_state(sd); for_each_active_route(&state->routing, route) source_mask |= BIT_ULL(route->source_stream); v4l2_subdev_unlock_state(state); } else { /* * For non-streams subdevices, there's a single implicit stream * per pad. */ source_mask = BIT_ULL(0); } if (enable) return v4l2_subdev_enable_streams(sd, pad_index, source_mask); else return v4l2_subdev_disable_streams(sd, pad_index, source_mask); } EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper); #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ #endif /* CONFIG_MEDIA_CONTROLLER */ void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops) { INIT_LIST_HEAD(&sd->list); BUG_ON(!ops); sd->ops = ops; sd->v4l2_dev = NULL; sd->flags = 0; sd->name[0] = '\0'; sd->grp_id = 0; sd->dev_priv = NULL; sd->host_priv = NULL; sd->privacy_led = NULL; INIT_LIST_HEAD(&sd->async_subdev_endpoint_list); #if defined(CONFIG_MEDIA_CONTROLLER) sd->entity.name = sd->name; sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV; sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; #endif } EXPORT_SYMBOL(v4l2_subdev_init); void v4l2_subdev_notify_event(struct v4l2_subdev *sd, const struct v4l2_event *ev) { v4l2_event_queue(sd->devnode, ev); v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev); } EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event); bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd) { struct v4l2_subdev_state *state; if (!v4l2_subdev_has_op(sd, pad, enable_streams)) return sd->s_stream_enabled; if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) return !!sd->enabled_pads; state = v4l2_subdev_get_locked_active_state(sd); for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { const struct v4l2_subdev_stream_config *cfg; cfg = &state->stream_configs.configs[i]; if (cfg->enabled) return true; } return false; } EXPORT_SYMBOL_GPL(v4l2_subdev_is_streaming); int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd) { #if IS_REACHABLE(CONFIG_LEDS_CLASS) sd->privacy_led = led_get(sd->dev, "privacy-led"); if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT) return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led), "getting privacy LED\n"); if (!IS_ERR_OR_NULL(sd->privacy_led)) { mutex_lock(&sd->privacy_led->led_access); led_sysfs_disable(sd->privacy_led); led_trigger_remove(sd->privacy_led); led_set_brightness(sd->privacy_led, 0); mutex_unlock(&sd->privacy_led->led_access); } #endif return 0; } EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led); void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd) { #if IS_REACHABLE(CONFIG_LEDS_CLASS) if (!IS_ERR_OR_NULL(sd->privacy_led)) { mutex_lock(&sd->privacy_led->led_access); led_sysfs_enable(sd->privacy_led); mutex_unlock(&sd->privacy_led->led_access); led_put(sd->privacy_led); } #endif } EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led);
8 8 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 // SPDX-License-Identifier: GPL-2.0 /* * f2fs shrinker support * the basic infra was copied from fs/ubifs/shrinker.c * * Copyright (c) 2015 Motorola Mobility * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org> */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include "f2fs.h" #include "node.h" static LIST_HEAD(f2fs_list); static DEFINE_SPINLOCK(f2fs_list_lock); static unsigned int shrinker_run_no; static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi) { return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT]; } static unsigned long __count_free_nids(struct f2fs_sb_info *sbi) { long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS; return count > 0 ? count : 0; } static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi, enum extent_type type) { struct extent_tree_info *eti = &sbi->extent_tree[type]; return atomic_read(&eti->total_zombie_tree) + atomic_read(&eti->total_ext_node); } unsigned long f2fs_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { struct f2fs_sb_info *sbi; struct list_head *p; unsigned long count = 0; spin_lock(&f2fs_list_lock); p = f2fs_list.next; while (p != &f2fs_list) { sbi = list_entry(p, struct f2fs_sb_info, s_list); /* stop f2fs_put_super */ if (!mutex_trylock(&sbi->umount_mutex)) { p = p->next; continue; } spin_unlock(&f2fs_list_lock); /* count read extent cache entries */ count += __count_extent_cache(sbi, EX_READ); /* count block age extent cache entries */ count += __count_extent_cache(sbi, EX_BLOCK_AGE); /* count clean nat cache entries */ count += __count_nat_entries(sbi); /* count free nids cache entries */ count += __count_free_nids(sbi); spin_lock(&f2fs_list_lock); p = p->next; mutex_unlock(&sbi->umount_mutex); } spin_unlock(&f2fs_list_lock); return count ?: SHRINK_EMPTY; } unsigned long f2fs_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { unsigned long nr = sc->nr_to_scan; struct f2fs_sb_info *sbi; struct list_head *p; unsigned int run_no; unsigned long freed = 0; spin_lock(&f2fs_list_lock); do { run_no = ++shrinker_run_no; } while (run_no == 0); p = f2fs_list.next; while (p != &f2fs_list) { sbi = list_entry(p, struct f2fs_sb_info, s_list); if (sbi->shrinker_run_no == run_no) break; /* stop f2fs_put_super */ if (!mutex_trylock(&sbi->umount_mutex)) { p = p->next; continue; } spin_unlock(&f2fs_list_lock); sbi->shrinker_run_no = run_no; /* shrink extent cache entries */ freed += f2fs_shrink_age_extent_tree(sbi, nr >> 2); /* shrink read extent cache entries */ freed += f2fs_shrink_read_extent_tree(sbi, nr >> 2); /* shrink clean nat cache entries */ if (freed < nr) freed += f2fs_try_to_free_nats(sbi, nr - freed); /* shrink free nids cache entries */ if (freed < nr) freed += f2fs_try_to_free_nids(sbi, nr - freed); spin_lock(&f2fs_list_lock); p = p->next; list_move_tail(&sbi->s_list, &f2fs_list); mutex_unlock(&sbi->umount_mutex); if (freed >= nr) break; } spin_unlock(&f2fs_list_lock); return freed; } unsigned int f2fs_donate_files(void) { struct f2fs_sb_info *sbi; struct list_head *p; unsigned int donate_files = 0; spin_lock(&f2fs_list_lock); p = f2fs_list.next; while (p != &f2fs_list) { sbi = list_entry(p, struct f2fs_sb_info, s_list); /* stop f2fs_put_super */ if (!mutex_trylock(&sbi->umount_mutex)) { p = p->next; continue; } spin_unlock(&f2fs_list_lock); donate_files += sbi->donate_files; spin_lock(&f2fs_list_lock); p = p->next; mutex_unlock(&sbi->umount_mutex); } spin_unlock(&f2fs_list_lock); return donate_files; } static unsigned int do_reclaim_caches(struct f2fs_sb_info *sbi, unsigned int reclaim_caches_kb) { struct inode *inode; struct f2fs_inode_info *fi; unsigned int nfiles = sbi->donate_files; pgoff_t npages = reclaim_caches_kb >> (PAGE_SHIFT - 10); while (npages && nfiles--) { pgoff_t len; spin_lock(&sbi->inode_lock[DONATE_INODE]); if (list_empty(&sbi->inode_list[DONATE_INODE])) { spin_unlock(&sbi->inode_lock[DONATE_INODE]); break; } fi = list_first_entry(&sbi->inode_list[DONATE_INODE], struct f2fs_inode_info, gdonate_list); list_move_tail(&fi->gdonate_list, &sbi->inode_list[DONATE_INODE]); inode = igrab(&fi->vfs_inode); spin_unlock(&sbi->inode_lock[DONATE_INODE]); if (!inode) continue; inode_lock(inode); if (!is_inode_flag_set(inode, FI_DONATE_FINISHED)) { len = fi->donate_end - fi->donate_start + 1; npages = npages < len ? 0 : npages - len; invalidate_inode_pages2_range(inode->i_mapping, fi->donate_start, fi->donate_end); set_inode_flag(inode, FI_DONATE_FINISHED); } inode_unlock(inode); iput(inode); cond_resched(); } return npages << (PAGE_SHIFT - 10); } void f2fs_reclaim_caches(unsigned int reclaim_caches_kb) { struct f2fs_sb_info *sbi; struct list_head *p; spin_lock(&f2fs_list_lock); p = f2fs_list.next; while (p != &f2fs_list && reclaim_caches_kb) { sbi = list_entry(p, struct f2fs_sb_info, s_list); /* stop f2fs_put_super */ if (!mutex_trylock(&sbi->umount_mutex)) { p = p->next; continue; } spin_unlock(&f2fs_list_lock); reclaim_caches_kb = do_reclaim_caches(sbi, reclaim_caches_kb); spin_lock(&f2fs_list_lock); p = p->next; mutex_unlock(&sbi->umount_mutex); } spin_unlock(&f2fs_list_lock); } void f2fs_join_shrinker(struct f2fs_sb_info *sbi) { spin_lock(&f2fs_list_lock); list_add_tail(&sbi->s_list, &f2fs_list); spin_unlock(&f2fs_list_lock); } void f2fs_leave_shrinker(struct f2fs_sb_info *sbi) { f2fs_shrink_read_extent_tree(sbi, __count_extent_cache(sbi, EX_READ)); f2fs_shrink_age_extent_tree(sbi, __count_extent_cache(sbi, EX_BLOCK_AGE)); spin_lock(&f2fs_list_lock); list_del_init(&sbi->s_list); spin_unlock(&f2fs_list_lock); }
5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 3 6 6 5 5 5 5 5 5 5 2 2 6 190 189 1 190 190 188 189 1 1 1 1 1 1 3 3 3 3 3 2 1 1 2 17 1 1 1 1 17 183 183 25 183 182 182 183 190 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 // SPDX-License-Identifier: GPL-2.0-only /* * HID raw devices, giving access to raw HID events. * * In comparison to hiddev, this device does not process the * hid events at all (no parsing, no lookups). This lets applications * to work on raw hid events as they want to, and avoids a need to * use a transport-specific userspace libhid/libusb libraries. * * Copyright (c) 2007-2014 Jiri Kosina */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/fs.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/cdev.h> #include <linux/poll.h> #include <linux/device.h> #include <linux/major.h> #include <linux/slab.h> #include <linux/hid.h> #include <linux/mutex.h> #include <linux/sched/signal.h> #include <linux/string.h> #include <linux/hidraw.h> static int hidraw_major; static struct cdev hidraw_cdev; static const struct class hidraw_class = { .name = "hidraw", }; static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES]; static DECLARE_RWSEM(minors_rwsem); static inline bool hidraw_is_revoked(struct hidraw_list *list) { return list->revoked; } static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct hidraw_list *list = file->private_data; int ret = 0, len; DECLARE_WAITQUEUE(wait, current); if (hidraw_is_revoked(list)) return -ENODEV; mutex_lock(&list->read_mutex); while (ret == 0) { if (list->head == list->tail) { add_wait_queue(&list->hidraw->wait, &wait); set_current_state(TASK_INTERRUPTIBLE); while (list->head == list->tail) { if (signal_pending(current)) { ret = -ERESTARTSYS; break; } if (!list->hidraw->exist) { ret = -EIO; break; } if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } /* allow O_NONBLOCK to work well from other threads */ mutex_unlock(&list->read_mutex); schedule(); mutex_lock(&list->read_mutex); set_current_state(TASK_INTERRUPTIBLE); } set_current_state(TASK_RUNNING); remove_wait_queue(&list->hidraw->wait, &wait); } if (ret) goto out; len = list->buffer[list->tail].len > count ? count : list->buffer[list->tail].len; if (list->buffer[list->tail].value) { if (copy_to_user(buffer, list->buffer[list->tail].value, len)) { ret = -EFAULT; goto out; } ret = len; } kfree(list->buffer[list->tail].value); list->buffer[list->tail].value = NULL; list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1); } out: mutex_unlock(&list->read_mutex); return ret; } /* * The first byte of the report buffer is expected to be a report number. */ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, size_t count, unsigned char report_type) { unsigned int minor = iminor(file_inode(file)); struct hid_device *dev; __u8 *buf; int ret = 0; lockdep_assert_held(&minors_rwsem); if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { ret = -ENODEV; goto out; } dev = hidraw_table[minor]->hid; if (count > HID_MAX_BUFFER_SIZE) { hid_warn(dev, "pid %d passed too large report\n", task_pid_nr(current)); ret = -EINVAL; goto out; } if (count < 2) { hid_warn(dev, "pid %d passed too short report\n", task_pid_nr(current)); ret = -EINVAL; goto out; } buf = memdup_user(buffer, count); if (IS_ERR(buf)) { ret = PTR_ERR(buf); goto out; } if ((report_type == HID_OUTPUT_REPORT) && !(dev->quirks & HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP)) { ret = __hid_hw_output_report(dev, buf, count, (u64)(long)file, false); /* * compatibility with old implementation of USB-HID and I2C-HID: * if the device does not support receiving output reports, * on an interrupt endpoint, fallback to SET_REPORT HID command. */ if (ret != -ENOSYS) goto out_free; } ret = __hid_hw_raw_request(dev, buf[0], buf, count, report_type, HID_REQ_SET_REPORT, (u64)(long)file, false); out_free: kfree(buf); out: return ret; } static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct hidraw_list *list = file->private_data; ssize_t ret; down_read(&minors_rwsem); if (hidraw_is_revoked(list)) ret = -ENODEV; else ret = hidraw_send_report(file, buffer, count, HID_OUTPUT_REPORT); up_read(&minors_rwsem); return ret; } /* * This function performs a Get_Report transfer over the control endpoint * per section 7.2.1 of the HID specification, version 1.1. The first byte * of buffer is the report number to request, or 0x0 if the device does not * use numbered reports. The report_type parameter can be HID_FEATURE_REPORT * or HID_INPUT_REPORT. */ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t count, unsigned char report_type) { unsigned int minor = iminor(file_inode(file)); struct hid_device *dev; __u8 *buf; int ret = 0, len; unsigned char report_number; lockdep_assert_held(&minors_rwsem); if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { ret = -ENODEV; goto out; } dev = hidraw_table[minor]->hid; if (!dev->ll_driver->raw_request) { ret = -ENODEV; goto out; } if (count > HID_MAX_BUFFER_SIZE) { hid_warn(dev, "pid %d passed too large report\n", task_pid_nr(current)); ret = -EINVAL; goto out; } if (count < 2) { hid_warn(dev, "pid %d passed too short report\n", task_pid_nr(current)); ret = -EINVAL; goto out; } buf = kmalloc(count, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto out; } /* * Read the first byte from the user. This is the report number, * which is passed to hid_hw_raw_request(). */ if (copy_from_user(&report_number, buffer, 1)) { ret = -EFAULT; goto out_free; } ret = __hid_hw_raw_request(dev, report_number, buf, count, report_type, HID_REQ_GET_REPORT, (u64)(long)file, false); if (ret < 0) goto out_free; len = (ret < count) ? ret : count; if (copy_to_user(buffer, buf, len)) { ret = -EFAULT; goto out_free; } ret = len; out_free: kfree(buf); out: return ret; } static __poll_t hidraw_poll(struct file *file, poll_table *wait) { struct hidraw_list *list = file->private_data; __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* hidraw is always writable */ poll_wait(file, &list->hidraw->wait, wait); if (list->head != list->tail) mask |= EPOLLIN | EPOLLRDNORM; if (!list->hidraw->exist || hidraw_is_revoked(list)) mask |= EPOLLERR | EPOLLHUP; return mask; } static int hidraw_open(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); struct hidraw *dev; struct hidraw_list *list; unsigned long flags; int err = 0; if (!(list = kzalloc(sizeof(struct hidraw_list), GFP_KERNEL))) { err = -ENOMEM; goto out; } /* * Technically not writing to the hidraw_table but a write lock is * required to protect the device refcount. This is symmetrical to * hidraw_release(). */ down_write(&minors_rwsem); if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { err = -ENODEV; goto out_unlock; } dev = hidraw_table[minor]; if (!dev->open++) { err = hid_hw_power(dev->hid, PM_HINT_FULLON); if (err < 0) { dev->open--; goto out_unlock; } err = hid_hw_open(dev->hid); if (err < 0) { hid_hw_power(dev->hid, PM_HINT_NORMAL); dev->open--; goto out_unlock; } } list->hidraw = hidraw_table[minor]; mutex_init(&list->read_mutex); spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags); list_add_tail(&list->node, &hidraw_table[minor]->list); spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags); file->private_data = list; out_unlock: up_write(&minors_rwsem); out: if (err < 0) kfree(list); return err; } static int hidraw_fasync(int fd, struct file *file, int on) { struct hidraw_list *list = file->private_data; if (hidraw_is_revoked(list)) return -ENODEV; return fasync_helper(fd, file, on, &list->fasync); } static void drop_ref(struct hidraw *hidraw, int exists_bit) { if (exists_bit) { hidraw->exist = 0; if (hidraw->open) { hid_hw_close(hidraw->hid); wake_up_interruptible(&hidraw->wait); } device_destroy(&hidraw_class, MKDEV(hidraw_major, hidraw->minor)); } else { --hidraw->open; } if (!hidraw->open) { if (!hidraw->exist) { hidraw_table[hidraw->minor] = NULL; kfree(hidraw); } else { /* close device for last reader */ hid_hw_close(hidraw->hid); hid_hw_power(hidraw->hid, PM_HINT_NORMAL); } } } static int hidraw_release(struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); struct hidraw_list *list = file->private_data; unsigned long flags; down_write(&minors_rwsem); spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags); while (list->tail != list->head) { kfree(list->buffer[list->tail].value); list->buffer[list->tail].value = NULL; list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1); } list_del(&list->node); spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags); kfree(list); drop_ref(hidraw_table[minor], 0); up_write(&minors_rwsem); return 0; } static int hidraw_revoke(struct hidraw_list *list) { list->revoked = true; return 0; } static long hidraw_fixed_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd, void __user *arg) { struct hid_device *hid = dev->hid; switch (cmd) { case HIDIOCGRDESCSIZE: if (put_user(hid->rsize, (int __user *)arg)) return -EFAULT; break; case HIDIOCGRDESC: { __u32 len; if (get_user(len, (int __user *)arg)) return -EFAULT; if (len > HID_MAX_DESCRIPTOR_SIZE - 1) return -EINVAL; if (copy_to_user(arg + offsetof( struct hidraw_report_descriptor, value[0]), hid->rdesc, min(hid->rsize, len))) return -EFAULT; break; } case HIDIOCGRAWINFO: { struct hidraw_devinfo dinfo; dinfo.bustype = hid->bus; dinfo.vendor = hid->vendor; dinfo.product = hid->product; if (copy_to_user(arg, &dinfo, sizeof(dinfo))) return -EFAULT; break; } case HIDIOCREVOKE: { struct hidraw_list *list = file->private_data; if (arg) return -EINVAL; return hidraw_revoke(list); } default: /* * None of the above ioctls can return -EAGAIN, so * use it as a marker that we need to check variable * length ioctls. */ return -EAGAIN; } return 0; } static long hidraw_rw_variable_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd, void __user *user_arg) { int len = _IOC_SIZE(cmd); switch (cmd & ~IOCSIZE_MASK) { case HIDIOCSFEATURE(0): return hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT); case HIDIOCGFEATURE(0): return hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT); case HIDIOCSINPUT(0): return hidraw_send_report(file, user_arg, len, HID_INPUT_REPORT); case HIDIOCGINPUT(0): return hidraw_get_report(file, user_arg, len, HID_INPUT_REPORT); case HIDIOCSOUTPUT(0): return hidraw_send_report(file, user_arg, len, HID_OUTPUT_REPORT); case HIDIOCGOUTPUT(0): return hidraw_get_report(file, user_arg, len, HID_OUTPUT_REPORT); } return -EINVAL; } static long hidraw_ro_variable_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd, void __user *user_arg) { struct hid_device *hid = dev->hid; int len = _IOC_SIZE(cmd); int field_len; switch (cmd & ~IOCSIZE_MASK) { case HIDIOCGRAWNAME(0): field_len = strlen(hid->name) + 1; if (len > field_len) len = field_len; return copy_to_user(user_arg, hid->name, len) ? -EFAULT : len; case HIDIOCGRAWPHYS(0): field_len = strlen(hid->phys) + 1; if (len > field_len) len = field_len; return copy_to_user(user_arg, hid->phys, len) ? -EFAULT : len; case HIDIOCGRAWUNIQ(0): field_len = strlen(hid->uniq) + 1; if (len > field_len) len = field_len; return copy_to_user(user_arg, hid->uniq, len) ? -EFAULT : len; } return -EINVAL; } static long hidraw_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(file); unsigned int minor = iminor(inode); struct hidraw *dev; struct hidraw_list *list = file->private_data; void __user *user_arg = (void __user *)arg; int ret; down_read(&minors_rwsem); dev = hidraw_table[minor]; if (!dev || !dev->exist || hidraw_is_revoked(list)) { ret = -ENODEV; goto out; } if (_IOC_TYPE(cmd) != 'H') { ret = -EINVAL; goto out; } if (_IOC_NR(cmd) > HIDIOCTL_LAST || _IOC_NR(cmd) == 0) { ret = -ENOTTY; goto out; } ret = hidraw_fixed_size_ioctl(file, dev, cmd, user_arg); if (ret != -EAGAIN) goto out; switch (_IOC_DIR(cmd)) { case (_IOC_READ | _IOC_WRITE): ret = hidraw_rw_variable_size_ioctl(file, dev, cmd, user_arg); break; case _IOC_READ: ret = hidraw_ro_variable_size_ioctl(file, dev, cmd, user_arg); break; default: /* Any other IOC_DIR is wrong */ ret = -EINVAL; } out: up_read(&minors_rwsem); return ret; } static const struct file_operations hidraw_ops = { .owner = THIS_MODULE, .read = hidraw_read, .write = hidraw_write, .poll = hidraw_poll, .open = hidraw_open, .release = hidraw_release, .unlocked_ioctl = hidraw_ioctl, .fasync = hidraw_fasync, .compat_ioctl = compat_ptr_ioctl, .llseek = noop_llseek, }; int hidraw_report_event(struct hid_device *hid, u8 *data, int len) { struct hidraw *dev = hid->hidraw; struct hidraw_list *list; int ret = 0; unsigned long flags; spin_lock_irqsave(&dev->list_lock, flags); list_for_each_entry(list, &dev->list, node) { int new_head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1); if (hidraw_is_revoked(list) || new_head == list->tail) continue; if (!(list->buffer[list->head].value = kmemdup(data, len, GFP_ATOMIC))) { ret = -ENOMEM; break; } list->buffer[list->head].len = len; list->head = new_head; kill_fasync(&list->fasync, SIGIO, POLL_IN); } spin_unlock_irqrestore(&dev->list_lock, flags); wake_up_interruptible(&dev->wait); return ret; } EXPORT_SYMBOL_GPL(hidraw_report_event); int hidraw_connect(struct hid_device *hid) { int minor, result; struct hidraw *dev; /* we accept any HID device, all applications */ dev = kzalloc(sizeof(struct hidraw), GFP_KERNEL); if (!dev) return -ENOMEM; result = -EINVAL; down_write(&minors_rwsem); for (minor = 0; minor < HIDRAW_MAX_DEVICES; minor++) { if (hidraw_table[minor]) continue; hidraw_table[minor] = dev; result = 0; break; } if (result) { up_write(&minors_rwsem); kfree(dev); goto out; } dev->dev = device_create(&hidraw_class, &hid->dev, MKDEV(hidraw_major, minor), NULL, "%s%d", "hidraw", minor); if (IS_ERR(dev->dev)) { hidraw_table[minor] = NULL; up_write(&minors_rwsem); result = PTR_ERR(dev->dev); kfree(dev); goto out; } init_waitqueue_head(&dev->wait); spin_lock_init(&dev->list_lock); INIT_LIST_HEAD(&dev->list); dev->hid = hid; dev->minor = minor; dev->exist = 1; hid->hidraw = dev; up_write(&minors_rwsem); out: return result; } EXPORT_SYMBOL_GPL(hidraw_connect); void hidraw_disconnect(struct hid_device *hid) { struct hidraw *hidraw = hid->hidraw; down_write(&minors_rwsem); drop_ref(hidraw, 1); up_write(&minors_rwsem); } EXPORT_SYMBOL_GPL(hidraw_disconnect); int __init hidraw_init(void) { int result; dev_t dev_id; result = alloc_chrdev_region(&dev_id, HIDRAW_FIRST_MINOR, HIDRAW_MAX_DEVICES, "hidraw"); if (result < 0) { pr_warn("can't get major number\n"); goto out; } hidraw_major = MAJOR(dev_id); result = class_register(&hidraw_class); if (result) goto error_cdev; cdev_init(&hidraw_cdev, &hidraw_ops); result = cdev_add(&hidraw_cdev, dev_id, HIDRAW_MAX_DEVICES); if (result < 0) goto error_class; pr_info("raw HID events driver (C) Jiri Kosina\n"); out: return result; error_class: class_unregister(&hidraw_class); error_cdev: unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES); goto out; } void hidraw_exit(void) { dev_t dev_id = MKDEV(hidraw_major, 0); cdev_del(&hidraw_cdev); class_unregister(&hidraw_class); unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES); }
67 65 65 64 65 65 65 67 317 317 91 89 77 8 316 74 57 57 74 59 1 24 7 6 5 4 4 4 291 300 66 65 300 300 300 65 65 58 58 69 69 303 69 69 69 2 2 303 68 68 303 67 303 67 65 58 64 61 61 59 60 60 288 288 288 288 62 62 62 288 288 288 59 4 4 288 288 288 288 288 288 288 57 56 56 250 313 87 24 6 75 2 75 73 75 23 88 11 10 6 1 5 4 5 9 4 9 11 81 80 78 1 79 72 19 4 4 2 4 4 3 4 66 66 66 66 65 9 65 65 66 66 65 65 61 3 63 2 62 60 61 61 3 3 66 66 66 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 65 66 66 69 3 66 66 69 3 69 67 69 5 71 5 5 5 72 71 3 71 69 69 67 69 66 66 69 72 7 3 3 3 3 3 3 3 3 11 2 11 9 8 5 1 1 1 1 4 4 9 8 1 1 11 11 9 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 // SPDX-License-Identifier: GPL-2.0-only /* * Kernel-based Virtual Machine driver for Linux * cpuid support routines * * derived from arch/x86/kvm/x86.c * * Copyright 2011 Red Hat, Inc. and/or its affiliates. * Copyright IBM Corporation, 2008 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kvm_host.h> #include "linux/lockdep.h" #include <linux/export.h> #include <linux/vmalloc.h> #include <linux/uaccess.h> #include <linux/sched/stat.h> #include <asm/processor.h> #include <asm/user.h> #include <asm/fpu/xstate.h> #include <asm/sgx.h> #include <asm/cpuid/api.h> #include "cpuid.h" #include "lapic.h" #include "mmu.h" #include "trace.h" #include "pmu.h" #include "xen.h" /* * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be * aligned to sizeof(unsigned long) because it's not accessed via bitops. */ u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpu_caps); struct cpuid_xstate_sizes { u32 eax; u32 ebx; u32 ecx; }; static struct cpuid_xstate_sizes xstate_sizes[XFEATURE_MAX] __ro_after_init; void __init kvm_init_xstate_sizes(void) { u32 ign; int i; for (i = XFEATURE_YMM; i < ARRAY_SIZE(xstate_sizes); i++) { struct cpuid_xstate_sizes *xs = &xstate_sizes[i]; cpuid_count(0xD, i, &xs->eax, &xs->ebx, &xs->ecx, &ign); } } u32 xstate_required_size(u64 xstate_bv, bool compacted) { u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; int i; xstate_bv &= XFEATURE_MASK_EXTEND; for (i = XFEATURE_YMM; i < ARRAY_SIZE(xstate_sizes) && xstate_bv; i++) { struct cpuid_xstate_sizes *xs = &xstate_sizes[i]; u32 offset; if (!(xstate_bv & BIT_ULL(i))) continue; /* ECX[1]: 64B alignment in compacted form */ if (compacted) offset = (xs->ecx & 0x2) ? ALIGN(ret, 64) : ret; else offset = xs->ebx; ret = max(ret, offset + xs->eax); xstate_bv &= ~BIT_ULL(i); } return ret; } struct kvm_cpuid_entry2 *kvm_find_cpuid_entry2( struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index) { struct kvm_cpuid_entry2 *e; int i; /* * KVM has a semi-arbitrary rule that querying the guest's CPUID model * with IRQs disabled is disallowed. The CPUID model can legitimately * have over one hundred entries, i.e. the lookup is slow, and IRQs are * typically disabled in KVM only when KVM is in a performance critical * path, e.g. the core VM-Enter/VM-Exit run loop. Nothing will break * if this rule is violated, this assertion is purely to flag potential * performance issues. If this fires, consider moving the lookup out * of the hotpath, e.g. by caching information during CPUID updates. */ lockdep_assert_irqs_enabled(); for (i = 0; i < nent; i++) { e = &entries[i]; if (e->function != function) continue; /* * If the index isn't significant, use the first entry with a * matching function. It's userspace's responsibility to not * provide "duplicate" entries in all cases. */ if (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index) return e; /* * Similarly, use the first matching entry if KVM is doing a * lookup (as opposed to emulating CPUID) for a function that's * architecturally defined as not having a significant index. */ if (index == KVM_CPUID_INDEX_NOT_SIGNIFICANT) { /* * Direct lookups from KVM should not diverge from what * KVM defines internally (the architectural behavior). */ WARN_ON_ONCE(cpuid_function_is_indexed(function)); return e; } } return NULL; } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_find_cpuid_entry2); static int kvm_check_cpuid(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; u64 xfeatures; /* * The existing code assumes virtual address is 48-bit or 57-bit in the * canonical address checks; exit if it is ever changed. */ best = kvm_find_cpuid_entry(vcpu, 0x80000008); if (best) { int vaddr_bits = (best->eax & 0xff00) >> 8; if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0) return -EINVAL; } /* * Exposing dynamic xfeatures to the guest requires additional * enabling in the FPU, e.g. to expand the guest XSAVE state size. */ best = kvm_find_cpuid_entry_index(vcpu, 0xd, 0); if (!best) return 0; xfeatures = best->eax | ((u64)best->edx << 32); xfeatures &= XFEATURE_MASK_USER_DYNAMIC; if (!xfeatures) return 0; return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures); } static u32 kvm_apply_cpuid_pv_features_quirk(struct kvm_vcpu *vcpu); static void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu); /* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */ static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, int nent) { struct kvm_cpuid_entry2 *orig; int i; /* * Apply runtime CPUID updates to the incoming CPUID entries to avoid * false positives due mismatches on KVM-owned feature flags. * * Note! @e2 and @nent track the _old_ CPUID entries! */ kvm_update_cpuid_runtime(vcpu); kvm_apply_cpuid_pv_features_quirk(vcpu); if (nent != vcpu->arch.cpuid_nent) return -EINVAL; for (i = 0; i < nent; i++) { orig = &vcpu->arch.cpuid_entries[i]; if (e2[i].function != orig->function || e2[i].index != orig->index || e2[i].flags != orig->flags || e2[i].eax != orig->eax || e2[i].ebx != orig->ebx || e2[i].ecx != orig->ecx || e2[i].edx != orig->edx) return -EINVAL; } return 0; } static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu, const char *sig) { struct kvm_hypervisor_cpuid cpuid = {}; struct kvm_cpuid_entry2 *entry; u32 base; for_each_possible_cpuid_base_hypervisor(base) { entry = kvm_find_cpuid_entry(vcpu, base); if (entry) { u32 signature[3]; signature[0] = entry->ebx; signature[1] = entry->ecx; signature[2] = entry->edx; if (!memcmp(signature, sig, sizeof(signature))) { cpuid.base = base; cpuid.limit = entry->eax; break; } } } return cpuid; } static u32 kvm_apply_cpuid_pv_features_quirk(struct kvm_vcpu *vcpu) { struct kvm_hypervisor_cpuid kvm_cpuid; struct kvm_cpuid_entry2 *best; kvm_cpuid = kvm_get_hypervisor_cpuid(vcpu, KVM_SIGNATURE); if (!kvm_cpuid.base) return 0; best = kvm_find_cpuid_entry(vcpu, kvm_cpuid.base | KVM_CPUID_FEATURES); if (!best) return 0; if (kvm_hlt_in_guest(vcpu->kvm)) best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT); return best->eax; } /* * Calculate guest's supported XCR0 taking into account guest CPUID data and * KVM's supported XCR0 (comprised of host's XCR0 and KVM_SUPPORTED_XCR0). */ static u64 cpuid_get_supported_xcr0(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry_index(vcpu, 0xd, 0); if (!best) return 0; return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0; } static u64 cpuid_get_supported_xss(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry_index(vcpu, 0xd, 1); if (!best) return 0; return (best->ecx | ((u64)best->edx << 32)) & kvm_caps.supported_xss; } static __always_inline void kvm_update_feature_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entry, unsigned int x86_feature, bool has_feature) { cpuid_entry_change(entry, x86_feature, has_feature); guest_cpu_cap_change(vcpu, x86_feature, has_feature); } static void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; vcpu->arch.cpuid_dynamic_bits_dirty = false; best = kvm_find_cpuid_entry(vcpu, 1); if (best) { kvm_update_feature_runtime(vcpu, best, X86_FEATURE_OSXSAVE, kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)); kvm_update_feature_runtime(vcpu, best, X86_FEATURE_APIC, vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE); if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) kvm_update_feature_runtime(vcpu, best, X86_FEATURE_MWAIT, vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT); } best = kvm_find_cpuid_entry_index(vcpu, 7, 0); if (best) kvm_update_feature_runtime(vcpu, best, X86_FEATURE_OSPKE, kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)); best = kvm_find_cpuid_entry_index(vcpu, 0xD, 0); if (best) best->ebx = xstate_required_size(vcpu->arch.xcr0, false); best = kvm_find_cpuid_entry_index(vcpu, 0xD, 1); if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) || cpuid_entry_has(best, X86_FEATURE_XSAVEC))) best->ebx = xstate_required_size(vcpu->arch.xcr0 | vcpu->arch.ia32_xss, true); } static bool kvm_cpuid_has_hyperv(struct kvm_vcpu *vcpu) { #ifdef CONFIG_KVM_HYPERV struct kvm_cpuid_entry2 *entry; entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE); return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX; #else return false; #endif } static bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *entry; entry = kvm_find_cpuid_entry(vcpu, 0); if (!entry) return false; return is_guest_vendor_amd(entry->ebx, entry->ecx, entry->edx) || is_guest_vendor_hygon(entry->ebx, entry->ecx, entry->edx); } /* * This isn't truly "unsafe", but except for the cpu_caps initialization code, * all register lookups should use __cpuid_entry_get_reg(), which provides * compile-time validation of the input. */ static u32 cpuid_get_reg_unsafe(struct kvm_cpuid_entry2 *entry, u32 reg) { switch (reg) { case CPUID_EAX: return entry->eax; case CPUID_EBX: return entry->ebx; case CPUID_ECX: return entry->ecx; case CPUID_EDX: return entry->edx; default: WARN_ON_ONCE(1); return 0; } } static int cpuid_func_emulated(struct kvm_cpuid_entry2 *entry, u32 func, bool include_partially_emulated); void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_cpuid_entry2 *best; struct kvm_cpuid_entry2 *entry; bool allow_gbpages; int i; memset(vcpu->arch.cpu_caps, 0, sizeof(vcpu->arch.cpu_caps)); BUILD_BUG_ON(ARRAY_SIZE(reverse_cpuid) != NR_KVM_CPU_CAPS); /* * Reset guest capabilities to userspace's guest CPUID definition, i.e. * honor userspace's definition for features that don't require KVM or * hardware management/support (or that KVM simply doesn't care about). */ for (i = 0; i < NR_KVM_CPU_CAPS; i++) { const struct cpuid_reg cpuid = reverse_cpuid[i]; struct kvm_cpuid_entry2 emulated; if (!cpuid.function) continue; entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index); if (!entry) continue; cpuid_func_emulated(&emulated, cpuid.function, true); /* * A vCPU has a feature if it's supported by KVM and is enabled * in guest CPUID. Note, this includes features that are * supported by KVM but aren't advertised to userspace! */ vcpu->arch.cpu_caps[i] = kvm_cpu_caps[i] | cpuid_get_reg_unsafe(&emulated, cpuid.reg); vcpu->arch.cpu_caps[i] &= cpuid_get_reg_unsafe(entry, cpuid.reg); } kvm_update_cpuid_runtime(vcpu); /* * If TDP is enabled, let the guest use GBPAGES if they're supported in * hardware. The hardware page walker doesn't let KVM disable GBPAGES, * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA * walk for performance and complexity reasons. Not to mention KVM * _can't_ solve the problem because GVA->GPA walks aren't visible to * KVM once a TDP translation is installed. Mimic hardware behavior so * that KVM's is at least consistent, i.e. doesn't randomly inject #PF. * If TDP is disabled, honor *only* guest CPUID as KVM has full control * and can install smaller shadow pages if the host lacks 1GiB support. */ allow_gbpages = tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) : guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES); guest_cpu_cap_change(vcpu, X86_FEATURE_GBPAGES, allow_gbpages); best = kvm_find_cpuid_entry(vcpu, 1); if (best && apic) { if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER)) apic->lapic_timer.timer_mode_mask = 3 << 17; else apic->lapic_timer.timer_mode_mask = 1 << 17; kvm_apic_set_version(vcpu); } vcpu->arch.guest_supported_xcr0 = cpuid_get_supported_xcr0(vcpu); vcpu->arch.guest_supported_xss = cpuid_get_supported_xss(vcpu); vcpu->arch.pv_cpuid.features = kvm_apply_cpuid_pv_features_quirk(vcpu); vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu); vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); kvm_pmu_refresh(vcpu); #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f) vcpu->arch.cr4_guest_rsvd_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_) | __cr4_reserved_bits(guest_cpu_cap_has, vcpu); #undef __kvm_cpu_cap_has kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu)); /* Invoke the vendor callback only after the above state is updated. */ kvm_x86_call(vcpu_after_set_cpuid)(vcpu); /* * Except for the MMU, which needs to do its thing any vendor specific * adjustments to the reserved GPA bits. */ kvm_mmu_after_set_cpuid(vcpu); kvm_make_request(KVM_REQ_RECALC_INTERCEPTS, vcpu); } int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry(vcpu, 0x80000000); if (!best || best->eax < 0x80000008) goto not_found; best = kvm_find_cpuid_entry(vcpu, 0x80000008); if (best) return best->eax & 0xff; not_found: return 36; } int cpuid_query_maxguestphyaddr(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry(vcpu, 0x80000000); if (!best || best->eax < 0x80000008) goto not_found; best = kvm_find_cpuid_entry(vcpu, 0x80000008); if (best) return (best->eax >> 16) & 0xff; not_found: return 0; } /* * This "raw" version returns the reserved GPA bits without any adjustments for * encryption technologies that usurp bits. The raw mask should be used if and * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs. */ u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu) { return rsvd_bits(cpuid_maxphyaddr(vcpu), 63); } static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, int nent) { u32 vcpu_caps[NR_KVM_CPU_CAPS]; int r; /* * Swap the existing (old) entries with the incoming (new) entries in * order to massage the new entries, e.g. to account for dynamic bits * that KVM controls, without clobbering the current guest CPUID, which * KVM needs to preserve in order to unwind on failure. * * Similarly, save the vCPU's current cpu_caps so that the capabilities * can be updated alongside the CPUID entries when performing runtime * updates. Full initialization is done if and only if the vCPU hasn't * run, i.e. only if userspace is potentially changing CPUID features. */ swap(vcpu->arch.cpuid_entries, e2); swap(vcpu->arch.cpuid_nent, nent); memcpy(vcpu_caps, vcpu->arch.cpu_caps, sizeof(vcpu_caps)); BUILD_BUG_ON(sizeof(vcpu_caps) != sizeof(vcpu->arch.cpu_caps)); /* * KVM does not correctly handle changing guest CPUID after KVM_RUN, as * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with * the core vCPU model on the fly. It would've been better to forbid any * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do * KVM_SET_CPUID{,2} again. To support this legacy behavior, check * whether the supplied CPUID data is equal to what's already set. */ if (kvm_vcpu_has_run(vcpu)) { r = kvm_cpuid_check_equal(vcpu, e2, nent); if (r) goto err; goto success; } #ifdef CONFIG_KVM_HYPERV if (kvm_cpuid_has_hyperv(vcpu)) { r = kvm_hv_vcpu_init(vcpu); if (r) goto err; } #endif r = kvm_check_cpuid(vcpu); if (r) goto err; #ifdef CONFIG_KVM_XEN vcpu->arch.xen.cpuid = kvm_get_hypervisor_cpuid(vcpu, XEN_SIGNATURE); #endif kvm_vcpu_after_set_cpuid(vcpu); success: kvfree(e2); return 0; err: memcpy(vcpu->arch.cpu_caps, vcpu_caps, sizeof(vcpu_caps)); swap(vcpu->arch.cpuid_entries, e2); swap(vcpu->arch.cpuid_nent, nent); return r; } /* when an old userspace process fills a new kernel module */ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid *cpuid, struct kvm_cpuid_entry __user *entries) { int r, i; struct kvm_cpuid_entry *e = NULL; struct kvm_cpuid_entry2 *e2 = NULL; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) return -E2BIG; if (cpuid->nent) { e = vmemdup_array_user(entries, cpuid->nent, sizeof(*e)); if (IS_ERR(e)) return PTR_ERR(e); e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT); if (!e2) { r = -ENOMEM; goto out_free_cpuid; } } for (i = 0; i < cpuid->nent; i++) { e2[i].function = e[i].function; e2[i].eax = e[i].eax; e2[i].ebx = e[i].ebx; e2[i].ecx = e[i].ecx; e2[i].edx = e[i].edx; e2[i].index = 0; e2[i].flags = 0; e2[i].padding[0] = 0; e2[i].padding[1] = 0; e2[i].padding[2] = 0; } r = kvm_set_cpuid(vcpu, e2, cpuid->nent); if (r) kvfree(e2); out_free_cpuid: kvfree(e); return r; } int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries) { struct kvm_cpuid_entry2 *e2 = NULL; int r; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) return -E2BIG; if (cpuid->nent) { e2 = vmemdup_array_user(entries, cpuid->nent, sizeof(*e2)); if (IS_ERR(e2)) return PTR_ERR(e2); } r = kvm_set_cpuid(vcpu, e2, cpuid->nent); if (r) kvfree(e2); return r; } int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries) { if (cpuid->nent < vcpu->arch.cpuid_nent) return -E2BIG; if (vcpu->arch.cpuid_dynamic_bits_dirty) kvm_update_cpuid_runtime(vcpu); if (copy_to_user(entries, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) return -EFAULT; cpuid->nent = vcpu->arch.cpuid_nent; return 0; } static __always_inline u32 raw_cpuid_get(struct cpuid_reg cpuid) { struct kvm_cpuid_entry2 entry; u32 base; /* * KVM only supports features defined by Intel (0x0), AMD (0x80000000), * and Centaur (0xc0000000). WARN if a feature for new vendor base is * defined, as this and other code would need to be updated. */ base = cpuid.function & 0xffff0000; if (WARN_ON_ONCE(base && base != 0x80000000 && base != 0xc0000000)) return 0; if (cpuid_eax(base) < cpuid.function) return 0; cpuid_count(cpuid.function, cpuid.index, &entry.eax, &entry.ebx, &entry.ecx, &entry.edx); return *__cpuid_entry_get_reg(&entry, cpuid.reg); } /* * For kernel-defined leafs, mask KVM's supported feature set with the kernel's * capabilities as well as raw CPUID. For KVM-defined leafs, consult only raw * CPUID, as KVM is the one and only authority (in the kernel). */ #define kvm_cpu_cap_init(leaf, feature_initializers...) \ do { \ const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32); \ const u32 __maybe_unused kvm_cpu_cap_init_in_progress = leaf; \ const u32 *kernel_cpu_caps = boot_cpu_data.x86_capability; \ u32 kvm_cpu_cap_passthrough = 0; \ u32 kvm_cpu_cap_synthesized = 0; \ u32 kvm_cpu_cap_emulated = 0; \ u32 kvm_cpu_cap_features = 0; \ \ feature_initializers \ \ kvm_cpu_caps[leaf] = kvm_cpu_cap_features; \ \ if (leaf < NCAPINTS) \ kvm_cpu_caps[leaf] &= kernel_cpu_caps[leaf]; \ \ kvm_cpu_caps[leaf] |= kvm_cpu_cap_passthrough; \ kvm_cpu_caps[leaf] &= (raw_cpuid_get(cpuid) | \ kvm_cpu_cap_synthesized); \ kvm_cpu_caps[leaf] |= kvm_cpu_cap_emulated; \ } while (0) /* * Assert that the feature bit being declared, e.g. via F(), is in the CPUID * word that's being initialized. Exempt 0x8000_0001.EDX usage of 0x1.EDX * features, as AMD duplicated many 0x1.EDX features into 0x8000_0001.EDX. */ #define KVM_VALIDATE_CPU_CAP_USAGE(name) \ do { \ u32 __leaf = __feature_leaf(X86_FEATURE_##name); \ \ BUILD_BUG_ON(__leaf != kvm_cpu_cap_init_in_progress); \ } while (0) #define F(name) \ ({ \ KVM_VALIDATE_CPU_CAP_USAGE(name); \ kvm_cpu_cap_features |= feature_bit(name); \ }) /* Scattered Flag - For features that are scattered by cpufeatures.h. */ #define SCATTERED_F(name) \ ({ \ BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \ KVM_VALIDATE_CPU_CAP_USAGE(name); \ if (boot_cpu_has(X86_FEATURE_##name)) \ F(name); \ }) /* Features that KVM supports only on 64-bit kernels. */ #define X86_64_F(name) \ ({ \ KVM_VALIDATE_CPU_CAP_USAGE(name); \ if (IS_ENABLED(CONFIG_X86_64)) \ F(name); \ }) /* * Emulated Feature - For features that KVM emulates in software irrespective * of host CPU/kernel support. */ #define EMULATED_F(name) \ ({ \ kvm_cpu_cap_emulated |= feature_bit(name); \ F(name); \ }) /* * Synthesized Feature - For features that are synthesized into boot_cpu_data, * i.e. may not be present in the raw CPUID, but can still be advertised to * userspace. Primarily used for mitigation related feature flags. */ #define SYNTHESIZED_F(name) \ ({ \ kvm_cpu_cap_synthesized |= feature_bit(name); \ F(name); \ }) /* * Passthrough Feature - For features that KVM supports based purely on raw * hardware CPUID, i.e. that KVM virtualizes even if the host kernel doesn't * use the feature. Simply force set the feature in KVM's capabilities, raw * CPUID support will be factored in by kvm_cpu_cap_mask(). */ #define PASSTHROUGH_F(name) \ ({ \ kvm_cpu_cap_passthrough |= feature_bit(name); \ F(name); \ }) /* * Aliased Features - For features in 0x8000_0001.EDX that are duplicates of * identical 0x1.EDX features, and thus are aliased from 0x1 to 0x8000_0001. */ #define ALIASED_1_EDX_F(name) \ ({ \ BUILD_BUG_ON(__feature_leaf(X86_FEATURE_##name) != CPUID_1_EDX); \ BUILD_BUG_ON(kvm_cpu_cap_init_in_progress != CPUID_8000_0001_EDX); \ kvm_cpu_cap_features |= feature_bit(name); \ }) /* * Vendor Features - For features that KVM supports, but are added in later * because they require additional vendor enabling. */ #define VENDOR_F(name) \ ({ \ KVM_VALIDATE_CPU_CAP_USAGE(name); \ }) /* * Runtime Features - For features that KVM dynamically sets/clears at runtime, * e.g. when CR4 changes, but which are never advertised to userspace. */ #define RUNTIME_F(name) \ ({ \ KVM_VALIDATE_CPU_CAP_USAGE(name); \ }) /* * Undefine the MSR bit macro to avoid token concatenation issues when * processing X86_FEATURE_SPEC_CTRL_SSBD. */ #undef SPEC_CTRL_SSBD /* DS is defined by ptrace-abi.h on 32-bit builds. */ #undef DS void kvm_set_cpu_caps(void) { memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps)); BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) > sizeof(boot_cpu_data.x86_capability)); kvm_cpu_cap_init(CPUID_1_ECX, F(XMM3), F(PCLMULQDQ), VENDOR_F(DTES64), /* * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not* * advertised to guests via CPUID! MWAIT is also technically a * runtime flag thanks to IA32_MISC_ENABLES; mark it as such so * that KVM is aware that it's a known, unadvertised flag. */ RUNTIME_F(MWAIT), /* DS-CPL */ VENDOR_F(VMX), /* SMX, EST */ /* TM2 */ F(SSSE3), /* CNXT-ID */ /* Reserved */ F(FMA), F(CX16), /* xTPR Update */ F(PDCM), F(PCID), /* Reserved, DCA */ F(XMM4_1), F(XMM4_2), EMULATED_F(X2APIC), F(MOVBE), F(POPCNT), EMULATED_F(TSC_DEADLINE_TIMER), F(AES), F(XSAVE), RUNTIME_F(OSXSAVE), F(AVX), F(F16C), F(RDRAND), EMULATED_F(HYPERVISOR), ); kvm_cpu_cap_init(CPUID_1_EDX, F(FPU), F(VME), F(DE), F(PSE), F(TSC), F(MSR), F(PAE), F(MCE), F(CX8), F(APIC), /* Reserved */ F(SEP), F(MTRR), F(PGE), F(MCA), F(CMOV), F(PAT), F(PSE36), /* PSN */ F(CLFLUSH), /* Reserved */ VENDOR_F(DS), /* ACPI */ F(MMX), F(FXSR), F(XMM), F(XMM2), F(SELFSNOOP), /* HTT, TM, Reserved, PBE */ ); kvm_cpu_cap_init(CPUID_7_0_EBX, F(FSGSBASE), EMULATED_F(TSC_ADJUST), F(SGX), F(BMI1), F(HLE), F(AVX2), F(FDP_EXCPTN_ONLY), F(SMEP), F(BMI2), F(ERMS), F(INVPCID), F(RTM), F(ZERO_FCS_FDS), VENDOR_F(MPX), F(AVX512F), F(AVX512DQ), F(RDSEED), F(ADX), F(SMAP), F(AVX512IFMA), F(CLFLUSHOPT), F(CLWB), VENDOR_F(INTEL_PT), F(AVX512PF), F(AVX512ER), F(AVX512CD), F(SHA_NI), F(AVX512BW), F(AVX512VL), ); kvm_cpu_cap_init(CPUID_7_ECX, F(AVX512VBMI), PASSTHROUGH_F(LA57), F(PKU), RUNTIME_F(OSPKE), F(RDPID), F(AVX512_VPOPCNTDQ), F(UMIP), F(AVX512_VBMI2), F(GFNI), F(VAES), F(VPCLMULQDQ), F(AVX512_VNNI), F(AVX512_BITALG), F(CLDEMOTE), F(MOVDIRI), F(MOVDIR64B), VENDOR_F(WAITPKG), F(SGX_LC), F(BUS_LOCK_DETECT), X86_64_F(SHSTK), ); /* * PKU not yet implemented for shadow paging and requires OSPKE * to be set on the host. Clear it if that is not the case */ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) kvm_cpu_cap_clear(X86_FEATURE_PKU); /* * Shadow Stacks aren't implemented in the Shadow MMU. Shadow Stack * accesses require "magic" Writable=0,Dirty=1 protection, which KVM * doesn't know how to emulate or map. */ if (!tdp_enabled) kvm_cpu_cap_clear(X86_FEATURE_SHSTK); kvm_cpu_cap_init(CPUID_7_EDX, F(AVX512_4VNNIW), F(AVX512_4FMAPS), F(SPEC_CTRL), F(SPEC_CTRL_SSBD), EMULATED_F(ARCH_CAPABILITIES), F(INTEL_STIBP), F(MD_CLEAR), F(AVX512_VP2INTERSECT), F(FSRM), F(SERIALIZE), F(TSXLDTRK), F(AVX512_FP16), F(AMX_TILE), F(AMX_INT8), F(AMX_BF16), F(FLUSH_L1D), F(IBT), ); /* * Disable support for IBT and SHSTK if KVM is configured to emulate * accesses to reserved GPAs, as KVM's emulator doesn't support IBT or * SHSTK, nor does KVM handle Shadow Stack #PFs (see above). */ if (allow_smaller_maxphyaddr) { kvm_cpu_cap_clear(X86_FEATURE_SHSTK); kvm_cpu_cap_clear(X86_FEATURE_IBT); } if (boot_cpu_has(X86_FEATURE_AMD_IBPB_RET) && boot_cpu_has(X86_FEATURE_AMD_IBPB) && boot_cpu_has(X86_FEATURE_AMD_IBRS)) kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL); if (boot_cpu_has(X86_FEATURE_STIBP)) kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP); if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD); kvm_cpu_cap_init(CPUID_7_1_EAX, F(SHA512), F(SM3), F(SM4), F(AVX_VNNI), F(AVX512_BF16), F(CMPCCXADD), F(FZRM), F(FSRS), F(FSRC), F(WRMSRNS), X86_64_F(LKGS), F(AMX_FP16), F(AVX_IFMA), F(LAM), ); kvm_cpu_cap_init(CPUID_7_1_ECX, SCATTERED_F(MSR_IMM), ); kvm_cpu_cap_init(CPUID_7_1_EDX, F(AVX_VNNI_INT8), F(AVX_NE_CONVERT), F(AMX_COMPLEX), F(AVX_VNNI_INT16), F(PREFETCHITI), F(AVX10), ); kvm_cpu_cap_init(CPUID_7_2_EDX, F(INTEL_PSFD), F(IPRED_CTRL), F(RRSBA_CTRL), F(DDPD_U), F(BHI_CTRL), F(MCDT_NO), ); kvm_cpu_cap_init(CPUID_D_1_EAX, F(XSAVEOPT), F(XSAVEC), F(XGETBV1), F(XSAVES), X86_64_F(XFD), ); kvm_cpu_cap_init(CPUID_12_EAX, SCATTERED_F(SGX1), SCATTERED_F(SGX2), SCATTERED_F(SGX_EDECCSSA), ); kvm_cpu_cap_init(CPUID_24_0_EBX, F(AVX10_128), F(AVX10_256), F(AVX10_512), ); kvm_cpu_cap_init(CPUID_8000_0001_ECX, F(LAHF_LM), F(CMP_LEGACY), VENDOR_F(SVM), /* ExtApicSpace */ F(CR8_LEGACY), F(ABM), F(SSE4A), F(MISALIGNSSE), F(3DNOWPREFETCH), F(OSVW), /* IBS */ F(XOP), /* SKINIT, WDT, LWP */ F(FMA4), F(TBM), F(TOPOEXT), VENDOR_F(PERFCTR_CORE), ); kvm_cpu_cap_init(CPUID_8000_0001_EDX, ALIASED_1_EDX_F(FPU), ALIASED_1_EDX_F(VME), ALIASED_1_EDX_F(DE), ALIASED_1_EDX_F(PSE), ALIASED_1_EDX_F(TSC), ALIASED_1_EDX_F(MSR), ALIASED_1_EDX_F(PAE), ALIASED_1_EDX_F(MCE), ALIASED_1_EDX_F(CX8), ALIASED_1_EDX_F(APIC), /* Reserved */ F(SYSCALL), ALIASED_1_EDX_F(MTRR), ALIASED_1_EDX_F(PGE), ALIASED_1_EDX_F(MCA), ALIASED_1_EDX_F(CMOV), ALIASED_1_EDX_F(PAT), ALIASED_1_EDX_F(PSE36), /* Reserved */ F(NX), /* Reserved */ F(MMXEXT), ALIASED_1_EDX_F(MMX), ALIASED_1_EDX_F(FXSR), F(FXSR_OPT), X86_64_F(GBPAGES), F(RDTSCP), /* Reserved */ X86_64_F(LM), F(3DNOWEXT), F(3DNOW), ); if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64)) kvm_cpu_cap_set(X86_FEATURE_GBPAGES); kvm_cpu_cap_init(CPUID_8000_0007_EDX, SCATTERED_F(CONSTANT_TSC), ); kvm_cpu_cap_init(CPUID_8000_0008_EBX, F(CLZERO), F(XSAVEERPTR), F(WBNOINVD), F(AMD_IBPB), F(AMD_IBRS), F(AMD_SSBD), F(VIRT_SSBD), F(AMD_SSB_NO), F(AMD_STIBP), F(AMD_STIBP_ALWAYS_ON), F(AMD_IBRS_SAME_MODE), F(AMD_PSFD), F(AMD_IBPB_RET), ); /* * AMD has separate bits for each SPEC_CTRL bit. * arch/x86/kernel/cpu/bugs.c is kind enough to * record that in cpufeatures so use them. */ if (boot_cpu_has(X86_FEATURE_IBPB)) { kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB); if (boot_cpu_has(X86_FEATURE_SPEC_CTRL) && !boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB_RET); } if (boot_cpu_has(X86_FEATURE_IBRS)) kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS); if (boot_cpu_has(X86_FEATURE_STIBP)) kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP); if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD); if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO); /* * The preference is to use SPEC CTRL MSR instead of the * VIRT_SPEC MSR. */ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) && !boot_cpu_has(X86_FEATURE_AMD_SSBD)) kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); /* All SVM features required additional vendor module enabling. */ kvm_cpu_cap_init(CPUID_8000_000A_EDX, VENDOR_F(NPT), VENDOR_F(VMCBCLEAN), VENDOR_F(FLUSHBYASID), VENDOR_F(NRIPS), VENDOR_F(TSCRATEMSR), VENDOR_F(V_VMSAVE_VMLOAD), VENDOR_F(LBRV), VENDOR_F(PAUSEFILTER), VENDOR_F(PFTHRESHOLD), VENDOR_F(VGIF), VENDOR_F(VNMI), VENDOR_F(SVME_ADDR_CHK), ); kvm_cpu_cap_init(CPUID_8000_001F_EAX, VENDOR_F(SME), VENDOR_F(SEV), /* VM_PAGE_FLUSH */ VENDOR_F(SEV_ES), F(SME_COHERENT), ); kvm_cpu_cap_init(CPUID_8000_0021_EAX, F(NO_NESTED_DATA_BP), F(WRMSR_XX_BASE_NS), /* * Synthesize "LFENCE is serializing" into the AMD-defined entry * in KVM's supported CPUID, i.e. if the feature is reported as * supported by the kernel. LFENCE_RDTSC was a Linux-defined * synthetic feature long before AMD joined the bandwagon, e.g. * LFENCE is serializing on most CPUs that support SSE2. On * CPUs that don't support AMD's leaf, ANDing with the raw host * CPUID will drop the flags, and reporting support in AMD's * leaf can make it easier for userspace to detect the feature. */ SYNTHESIZED_F(LFENCE_RDTSC), /* SmmPgCfgLock */ /* 4: Resv */ SYNTHESIZED_F(VERW_CLEAR), F(NULL_SEL_CLR_BASE), /* UpperAddressIgnore */ F(AUTOIBRS), F(PREFETCHI), EMULATED_F(NO_SMM_CTL_MSR), /* PrefetchCtlMsr */ /* GpOnUserCpuid */ /* EPSF */ SYNTHESIZED_F(SBPB), SYNTHESIZED_F(IBPB_BRTYPE), SYNTHESIZED_F(SRSO_NO), F(SRSO_USER_KERNEL_NO), ); kvm_cpu_cap_init(CPUID_8000_0021_ECX, SYNTHESIZED_F(TSA_SQ_NO), SYNTHESIZED_F(TSA_L1_NO), ); kvm_cpu_cap_init(CPUID_8000_0022_EAX, F(PERFMON_V2), ); if (!static_cpu_has_bug(X86_BUG_NULL_SEG)) kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE); kvm_cpu_cap_init(CPUID_C000_0001_EDX, F(XSTORE), F(XSTORE_EN), F(XCRYPT), F(XCRYPT_EN), F(ACE2), F(ACE2_EN), F(PHE), F(PHE_EN), F(PMM), F(PMM_EN), ); /* * Hide RDTSCP and RDPID if either feature is reported as supported but * probing MSR_TSC_AUX failed. This is purely a sanity check and * should never happen, but the guest will likely crash if RDTSCP or * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in * the past. For example, the sanity check may fire if this instance of * KVM is running as L1 on top of an older, broken KVM. */ if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) || kvm_cpu_cap_has(X86_FEATURE_RDPID)) && !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) { kvm_cpu_cap_clear(X86_FEATURE_RDTSCP); kvm_cpu_cap_clear(X86_FEATURE_RDPID); } } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cpu_caps); #undef F #undef SCATTERED_F #undef X86_64_F #undef EMULATED_F #undef SYNTHESIZED_F #undef PASSTHROUGH_F #undef ALIASED_1_EDX_F #undef VENDOR_F #undef RUNTIME_F struct kvm_cpuid_array { struct kvm_cpuid_entry2 *entries; int maxnent; int nent; }; static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array) { if (array->nent >= array->maxnent) return NULL; return &array->entries[array->nent++]; } static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array, u32 function, u32 index) { struct kvm_cpuid_entry2 *entry = get_next_cpuid(array); if (!entry) return NULL; memset(entry, 0, sizeof(*entry)); entry->function = function; entry->index = index; switch (function & 0xC0000000) { case 0x40000000: /* Hypervisor leaves are always synthesized by __do_cpuid_func. */ return entry; case 0x80000000: /* * 0x80000021 is sometimes synthesized by __do_cpuid_func, which * would result in out-of-bounds calls to do_host_cpuid. */ { static int max_cpuid_80000000; if (!READ_ONCE(max_cpuid_80000000)) WRITE_ONCE(max_cpuid_80000000, cpuid_eax(0x80000000)); if (function > READ_ONCE(max_cpuid_80000000)) return entry; } break; default: break; } cpuid_count(entry->function, entry->index, &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); if (cpuid_function_is_indexed(function)) entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; return entry; } static int cpuid_func_emulated(struct kvm_cpuid_entry2 *entry, u32 func, bool include_partially_emulated) { memset(entry, 0, sizeof(*entry)); entry->function = func; entry->index = 0; entry->flags = 0; switch (func) { case 0: entry->eax = 7; return 1; case 1: entry->ecx = feature_bit(MOVBE); /* * KVM allows userspace to enumerate MONITOR+MWAIT support to * the guest, but the MWAIT feature flag is never advertised * to userspace because MONITOR+MWAIT aren't virtualized by * hardware, can't be faithfully emulated in software (KVM * emulates them as NOPs), and allowing the guest to execute * them natively requires enabling a per-VM capability. */ if (include_partially_emulated) entry->ecx |= feature_bit(MWAIT); return 1; case 7: entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; entry->eax = 0; if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) entry->ecx = feature_bit(RDPID); return 1; default: return 0; } } static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func) { if (array->nent >= array->maxnent) return -E2BIG; array->nent += cpuid_func_emulated(&array->entries[array->nent], func, false); return 0; } static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) { struct kvm_cpuid_entry2 *entry; int r, i, max_idx; /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); r = -E2BIG; entry = do_host_cpuid(array, function, 0); if (!entry) goto out; switch (function) { case 0: /* Limited to the highest leaf implemented in KVM. */ entry->eax = min(entry->eax, 0x24U); break; case 1: cpuid_entry_override(entry, CPUID_1_EDX); cpuid_entry_override(entry, CPUID_1_ECX); break; case 2: /* * On ancient CPUs, function 2 entries are STATEFUL. That is, * CPUID(function=2, index=0) may return different results each * time, with the least-significant byte in EAX enumerating the * number of times software should do CPUID(2, 0). * * Modern CPUs, i.e. every CPU KVM has *ever* run on are less * idiotic. Intel's SDM states that EAX & 0xff "will always * return 01H. Software should ignore this value and not * interpret it as an informational descriptor", while AMD's * APM states that CPUID(2) is reserved. * * WARN if a frankenstein CPU that supports virtualization and * a stateful CPUID.0x2 is encountered. */ WARN_ON_ONCE((entry->eax & 0xff) > 1); break; /* functions 4 and 0x8000001d have additional index. */ case 4: case 0x8000001d: /* * Read entries until the cache type in the previous entry is * zero, i.e. indicates an invalid entry. */ for (i = 1; entry->eax & 0x1f; ++i) { entry = do_host_cpuid(array, function, i); if (!entry) goto out; } break; case 6: /* Thermal management */ entry->eax = 0x4; /* allow ARAT */ entry->ebx = 0; entry->ecx = 0; entry->edx = 0; break; /* function 7 has additional index. */ case 7: max_idx = entry->eax = min(entry->eax, 2u); cpuid_entry_override(entry, CPUID_7_0_EBX); cpuid_entry_override(entry, CPUID_7_ECX); cpuid_entry_override(entry, CPUID_7_EDX); /* KVM only supports up to 0x7.2, capped above via min(). */ if (max_idx >= 1) { entry = do_host_cpuid(array, function, 1); if (!entry) goto out; cpuid_entry_override(entry, CPUID_7_1_EAX); cpuid_entry_override(entry, CPUID_7_1_ECX); cpuid_entry_override(entry, CPUID_7_1_EDX); entry->ebx = 0; } if (max_idx >= 2) { entry = do_host_cpuid(array, function, 2); if (!entry) goto out; cpuid_entry_override(entry, CPUID_7_2_EDX); entry->ecx = 0; entry->ebx = 0; entry->eax = 0; } break; case 0xa: { /* Architectural Performance Monitoring */ union cpuid10_eax eax = { }; union cpuid10_edx edx = { }; if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } eax.split.version_id = kvm_pmu_cap.version; eax.split.num_counters = kvm_pmu_cap.num_counters_gp; eax.split.bit_width = kvm_pmu_cap.bit_width_gp; eax.split.mask_length = kvm_pmu_cap.events_mask_len; edx.split.num_counters_fixed = kvm_pmu_cap.num_counters_fixed; edx.split.bit_width_fixed = kvm_pmu_cap.bit_width_fixed; if (kvm_pmu_cap.version) edx.split.anythread_deprecated = 1; entry->eax = eax.full; entry->ebx = kvm_pmu_cap.events_mask; entry->ecx = 0; entry->edx = edx.full; break; } case 0x1f: case 0xb: /* * No topology; a valid topology is indicated by the presence * of subleaf 1. */ entry->eax = entry->ebx = entry->ecx = 0; break; case 0xd: { u64 permitted_xcr0 = kvm_get_filtered_xcr0(); u64 permitted_xss = kvm_caps.supported_xss; entry->eax &= permitted_xcr0; entry->ebx = xstate_required_size(permitted_xcr0, false); entry->ecx = entry->ebx; entry->edx &= permitted_xcr0 >> 32; if (!permitted_xcr0) break; entry = do_host_cpuid(array, function, 1); if (!entry) goto out; cpuid_entry_override(entry, CPUID_D_1_EAX); if (entry->eax & (feature_bit(XSAVES) | feature_bit(XSAVEC))) entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss, true); else { WARN_ON_ONCE(permitted_xss != 0); entry->ebx = 0; } entry->ecx &= permitted_xss; entry->edx &= permitted_xss >> 32; for (i = 2; i < 64; ++i) { bool s_state; if (permitted_xcr0 & BIT_ULL(i)) s_state = false; else if (permitted_xss & BIT_ULL(i)) s_state = true; else continue; entry = do_host_cpuid(array, function, i); if (!entry) goto out; /* * The supported check above should have filtered out * invalid sub-leafs. Only valid sub-leafs should * reach this point, and they should have a non-zero * save state size. Furthermore, check whether the * processor agrees with permitted_xcr0/permitted_xss * on whether this is an XCR0- or IA32_XSS-managed area. */ if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) { --array->nent; continue; } if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) entry->ecx &= ~BIT_ULL(2); entry->edx = 0; } break; } case 0x12: /* Intel SGX */ if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } /* * Index 0: Sub-features, MISCSELECT (a.k.a extended features) * and max enclave sizes. The SGX sub-features and MISCSELECT * are restricted by kernel and KVM capabilities (like most * feature flags), while enclave size is unrestricted. */ cpuid_entry_override(entry, CPUID_12_EAX); entry->ebx &= SGX_MISC_EXINFO; entry = do_host_cpuid(array, function, 1); if (!entry) goto out; /* * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la * feature flags. Advertise all supported flags, including * privileged attributes that require explicit opt-in from * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is * expected to derive it from supported XCR0. */ entry->eax &= SGX_ATTR_PRIV_MASK | SGX_ATTR_UNPRIV_MASK; entry->ebx &= 0; break; /* Intel PT */ case 0x14: if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) { if (!do_host_cpuid(array, function, i)) goto out; } break; /* Intel AMX TILE */ case 0x1d: if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) { if (!do_host_cpuid(array, function, i)) goto out; } break; case 0x1e: /* TMUL information */ if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } break; case 0x24: { u8 avx10_version; if (!kvm_cpu_cap_has(X86_FEATURE_AVX10)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } /* * The AVX10 version is encoded in EBX[7:0]. Note, the version * is guaranteed to be >=1 if AVX10 is supported. Note #2, the * version needs to be captured before overriding EBX features! */ avx10_version = min_t(u8, entry->ebx & 0xff, 1); cpuid_entry_override(entry, CPUID_24_0_EBX); entry->ebx |= avx10_version; entry->eax = 0; entry->ecx = 0; entry->edx = 0; break; } case KVM_CPUID_SIGNATURE: { const u32 *sigptr = (const u32 *)KVM_SIGNATURE; entry->eax = KVM_CPUID_FEATURES; entry->ebx = sigptr[0]; entry->ecx = sigptr[1]; entry->edx = sigptr[2]; break; } case KVM_CPUID_FEATURES: entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | (1 << KVM_FEATURE_NOP_IO_DELAY) | (1 << KVM_FEATURE_CLOCKSOURCE2) | (1 << KVM_FEATURE_ASYNC_PF) | (1 << KVM_FEATURE_PV_EOI) | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | (1 << KVM_FEATURE_PV_UNHALT) | (1 << KVM_FEATURE_PV_TLB_FLUSH) | (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) | (1 << KVM_FEATURE_PV_SEND_IPI) | (1 << KVM_FEATURE_POLL_CONTROL) | (1 << KVM_FEATURE_PV_SCHED_YIELD) | (1 << KVM_FEATURE_ASYNC_PF_INT); if (sched_info_on()) entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); entry->ebx = 0; entry->ecx = 0; entry->edx = 0; break; case 0x80000000: entry->eax = min(entry->eax, 0x80000022); /* * Serializing LFENCE is reported in a multitude of ways, and * NullSegClearsBase is not reported in CPUID on Zen2; help * userspace by providing the CPUID leaf ourselves. * * However, only do it if the host has CPUID leaf 0x8000001d. * QEMU thinks that it can query the host blindly for that * CPUID leaf if KVM reports that it supports 0x8000001d or * above. The processor merrily returns values from the * highest Intel leaf which QEMU tries to use as the guest's * 0x8000001d. Even worse, this can result in an infinite * loop if said highest leaf has no subleaves indexed by ECX. */ if (entry->eax >= 0x8000001d && (static_cpu_has(X86_FEATURE_LFENCE_RDTSC) || !static_cpu_has_bug(X86_BUG_NULL_SEG))) entry->eax = max(entry->eax, 0x80000021); break; case 0x80000001: entry->ebx &= ~GENMASK(27, 16); cpuid_entry_override(entry, CPUID_8000_0001_EDX); cpuid_entry_override(entry, CPUID_8000_0001_ECX); break; case 0x80000005: /* Pass host L1 cache and TLB info. */ break; case 0x80000006: /* Drop reserved bits, pass host L2 cache and TLB info. */ entry->edx &= ~GENMASK(17, 16); break; case 0x80000007: /* Advanced power management */ cpuid_entry_override(entry, CPUID_8000_0007_EDX); /* mask against host */ entry->edx &= boot_cpu_data.x86_power; entry->eax = entry->ebx = entry->ecx = 0; break; case 0x80000008: { /* * GuestPhysAddrSize (EAX[23:16]) is intended for software * use. * * KVM's ABI is to report the effective MAXPHYADDR for the * guest in PhysAddrSize (phys_as), and the maximum * *addressable* GPA in GuestPhysAddrSize (g_phys_as). * * GuestPhysAddrSize is valid if and only if TDP is enabled, * in which case the max GPA that can be addressed by KVM may * be less than the max GPA that can be legally generated by * the guest, e.g. if MAXPHYADDR>48 but the CPU doesn't * support 5-level TDP. */ unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U); unsigned int phys_as, g_phys_as; /* * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as * the guest operates in the same PA space as the host, i.e. * reductions in MAXPHYADDR for memory encryption affect shadow * paging, too. * * If TDP is enabled, use the raw bare metal MAXPHYADDR as * reductions to the HPAs do not affect GPAs. The max * addressable GPA is the same as the max effective GPA, except * that it's capped at 48 bits if 5-level TDP isn't supported * (hardware processes bits 51:48 only when walking the fifth * level page table). */ if (!tdp_enabled) { phys_as = boot_cpu_data.x86_phys_bits; g_phys_as = 0; } else { phys_as = entry->eax & 0xff; g_phys_as = phys_as; if (kvm_mmu_get_max_tdp_level() < 5) g_phys_as = min(g_phys_as, 48U); } entry->eax = phys_as | (virt_as << 8) | (g_phys_as << 16); entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8)); entry->edx = 0; cpuid_entry_override(entry, CPUID_8000_0008_EBX); break; } case 0x8000000A: if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } entry->eax = 1; /* SVM revision 1 */ entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper ASID emulation to nested SVM */ entry->ecx = 0; /* Reserved */ cpuid_entry_override(entry, CPUID_8000_000A_EDX); break; case 0x80000019: entry->ecx = entry->edx = 0; break; case 0x8000001a: entry->eax &= GENMASK(2, 0); entry->ebx = entry->ecx = entry->edx = 0; break; case 0x8000001e: /* Do not return host topology information. */ entry->eax = entry->ebx = entry->ecx = 0; entry->edx = 0; /* reserved */ break; case 0x8000001F: if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; } else { cpuid_entry_override(entry, CPUID_8000_001F_EAX); /* Clear NumVMPL since KVM does not support VMPL. */ entry->ebx &= ~GENMASK(31, 12); /* * Enumerate '0' for "PA bits reduction", the adjusted * MAXPHYADDR is enumerated directly (see 0x80000008). */ entry->ebx &= ~GENMASK(11, 6); } break; case 0x80000020: entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; case 0x80000021: entry->ebx = entry->edx = 0; cpuid_entry_override(entry, CPUID_8000_0021_EAX); cpuid_entry_override(entry, CPUID_8000_0021_ECX); break; /* AMD Extended Performance Monitoring and Debug */ case 0x80000022: { union cpuid_0x80000022_ebx ebx = { }; entry->ecx = entry->edx = 0; if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) { entry->eax = entry->ebx = 0; break; } cpuid_entry_override(entry, CPUID_8000_0022_EAX); ebx.split.num_core_pmc = kvm_pmu_cap.num_counters_gp; entry->ebx = ebx.full; break; } /*Add support for Centaur's CPUID instruction*/ case 0xC0000000: /*Just support up to 0xC0000004 now*/ entry->eax = min(entry->eax, 0xC0000004); break; case 0xC0000001: cpuid_entry_override(entry, CPUID_C000_0001_EDX); break; case 3: /* Processor serial number */ case 5: /* MONITOR/MWAIT */ case 0xC0000002: case 0xC0000003: case 0xC0000004: default: entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } r = 0; out: put_cpu(); return r; } static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func, unsigned int type) { if (type == KVM_GET_EMULATED_CPUID) return __do_cpuid_func_emulated(array, func); return __do_cpuid_func(array, func); } #define CENTAUR_CPUID_SIGNATURE 0xC0000000 static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func, unsigned int type) { u32 limit; int r; if (func == CENTAUR_CPUID_SIGNATURE && boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR && boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) return 0; r = do_cpuid_func(array, func, type); if (r) return r; limit = array->entries[array->nent - 1].eax; for (func = func + 1; func <= limit; ++func) { r = do_cpuid_func(array, func, type); if (r) break; } return r; } static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries, __u32 num_entries, unsigned int ioctl_type) { int i; __u32 pad[3]; if (ioctl_type != KVM_GET_EMULATED_CPUID) return false; /* * We want to make sure that ->padding is being passed clean from * userspace in case we want to use it for something in the future. * * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we * have to give ourselves satisfied only with the emulated side. /me * sheds a tear. */ for (i = 0; i < num_entries; i++) { if (copy_from_user(pad, entries[i].padding, sizeof(pad))) return true; if (pad[0] || pad[1] || pad[2]) return true; } return false; } int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries, unsigned int type) { static const u32 funcs[] = { 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE, }; struct kvm_cpuid_array array = { .nent = 0, }; int r, i; if (cpuid->nent < 1) return -E2BIG; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) cpuid->nent = KVM_MAX_CPUID_ENTRIES; if (sanity_check_entries(entries, cpuid->nent, type)) return -EINVAL; array.entries = kvcalloc(cpuid->nent, sizeof(struct kvm_cpuid_entry2), GFP_KERNEL); if (!array.entries) return -ENOMEM; array.maxnent = cpuid->nent; for (i = 0; i < ARRAY_SIZE(funcs); i++) { r = get_cpuid_func(&array, funcs[i], type); if (r) goto out_free; } cpuid->nent = array.nent; if (copy_to_user(entries, array.entries, array.nent * sizeof(struct kvm_cpuid_entry2))) r = -EFAULT; out_free: kvfree(array.entries); return r; } /* * Intel CPUID semantics treats any query for an out-of-range leaf as if the * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics * returns all zeroes for any undefined leaf, whether or not the leaf is in * range. Centaur/VIA follows Intel semantics. * * A leaf is considered out-of-range if its function is higher than the maximum * supported leaf of its associated class or if its associated class does not * exist. * * There are three primary classes to be considered, with their respective * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary * class exists if a guest CPUID entry for its <base> leaf exists. For a given * class, CPUID.<base>.EAX contains the max supported leaf for the class. * * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff * - Hypervisor: 0x40000000 - 0x4fffffff * - Extended: 0x80000000 - 0xbfffffff * - Centaur: 0xc0000000 - 0xcfffffff * * The Hypervisor class is further subdivided into sub-classes that each act as * their own independent class associated with a 0x100 byte range. E.g. if Qemu * is advertising support for both HyperV and KVM, the resulting Hypervisor * CPUID sub-classes are: * * - HyperV: 0x40000000 - 0x400000ff * - KVM: 0x40000100 - 0x400001ff */ static struct kvm_cpuid_entry2 * get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index) { struct kvm_cpuid_entry2 *basic, *class; u32 function = *fn_ptr; basic = kvm_find_cpuid_entry(vcpu, 0); if (!basic) return NULL; if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) || is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx)) return NULL; if (function >= 0x40000000 && function <= 0x4fffffff) class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00); else if (function >= 0xc0000000) class = kvm_find_cpuid_entry(vcpu, 0xc0000000); else class = kvm_find_cpuid_entry(vcpu, function & 0x80000000); if (class && function <= class->eax) return NULL; /* * Leaf specific adjustments are also applied when redirecting to the * max basic entry, e.g. if the max basic leaf is 0xb but there is no * entry for CPUID.0xb.index (see below), then the output value for EDX * needs to be pulled from CPUID.0xb.1. */ *fn_ptr = basic->eax; /* * The class does not exist or the requested function is out of range; * the effective CPUID entry is the max basic leaf. Note, the index of * the original requested leaf is observed! */ return kvm_find_cpuid_entry_index(vcpu, basic->eax, index); } bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool exact_only) { u32 orig_function = *eax, function = *eax, index = *ecx; struct kvm_cpuid_entry2 *entry; bool exact, used_max_basic = false; if (vcpu->arch.cpuid_dynamic_bits_dirty) kvm_update_cpuid_runtime(vcpu); entry = kvm_find_cpuid_entry_index(vcpu, function, index); exact = !!entry; if (!entry && !exact_only) { entry = get_out_of_range_cpuid_entry(vcpu, &function, index); used_max_basic = !!entry; } if (entry) { *eax = entry->eax; *ebx = entry->ebx; *ecx = entry->ecx; *edx = entry->edx; if (function == 7 && index == 0) { u64 data; if ((*ebx & (feature_bit(RTM) | feature_bit(HLE))) && !kvm_msr_read(vcpu, MSR_IA32_TSX_CTRL, &data) && (data & TSX_CTRL_CPUID_CLEAR)) *ebx &= ~(feature_bit(RTM) | feature_bit(HLE)); } else if (function == 0x80000007) { if (kvm_hv_invtsc_suppressed(vcpu)) *edx &= ~feature_bit(CONSTANT_TSC); } else if (IS_ENABLED(CONFIG_KVM_XEN) && kvm_xen_is_tsc_leaf(vcpu, function)) { /* * Update guest TSC frequency information if necessary. * Ignore failures, there is no sane value that can be * provided if KVM can't get the TSC frequency. */ if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) kvm_guest_time_update(vcpu); if (index == 1) { *ecx = vcpu->arch.pvclock_tsc_mul; *edx = vcpu->arch.pvclock_tsc_shift; } else if (index == 2) { *eax = vcpu->arch.hw_tsc_khz; } } } else { *eax = *ebx = *ecx = *edx = 0; /* * When leaf 0BH or 1FH is defined, CL is pass-through * and EDX is always the x2APIC ID, even for undefined * subleaves. Index 1 will exist iff the leaf is * implemented, so we pass through CL iff leaf 1 * exists. EDX can be copied from any existing index. */ if (function == 0xb || function == 0x1f) { entry = kvm_find_cpuid_entry_index(vcpu, function, 1); if (entry) { *ecx = index & 0xff; *edx = entry->edx; } } } trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact, used_max_basic); return exact; } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpuid); int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) { u32 eax, ebx, ecx, edx; if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0)) return 1; eax = kvm_rax_read(vcpu); ecx = kvm_rcx_read(vcpu); kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false); kvm_rax_write(vcpu, eax); kvm_rbx_write(vcpu, ebx); kvm_rcx_write(vcpu, ecx); kvm_rdx_write(vcpu, edx); return kvm_skip_emulated_instruction(vcpu); } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_cpuid);
1 2 1 1 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 // SPDX-License-Identifier: GPL-2.0-only /* * HID driver for some ITE "special" devices * Copyright (c) 2017 Hans de Goede <hdegoede@redhat.com> */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" #define QUIRK_TOUCHPAD_ON_OFF_REPORT BIT(0) static const __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); if (quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) { /* For Acer Aspire Switch 10 SW5-012 keyboard-dock */ if (*rsize == 188 && rdesc[162] == 0x81 && rdesc[163] == 0x02) { hid_info(hdev, "Fixing up Acer Sw5-012 ITE keyboard report descriptor\n"); rdesc[163] = HID_MAIN_ITEM_RELATIVE; } /* For Acer One S1002/S1003 keyboard-dock */ if (*rsize == 188 && rdesc[185] == 0x81 && rdesc[186] == 0x02) { hid_info(hdev, "Fixing up Acer S1002/S1003 ITE keyboard report descriptor\n"); rdesc[186] = HID_MAIN_ITEM_RELATIVE; } /* For Acer Aspire Switch 10E (SW3-016) keyboard-dock */ if (*rsize == 210 && rdesc[184] == 0x81 && rdesc[185] == 0x02) { hid_info(hdev, "Fixing up Acer Aspire Switch 10E (SW3-016) ITE keyboard report descriptor\n"); rdesc[185] = HID_MAIN_ITEM_RELATIVE; } } return rdesc; } static int ite_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); if ((quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) && (usage->hid & HID_USAGE_PAGE) == 0x00880000) { if (usage->hid == 0x00880078) { /* Touchpad on, userspace expects F22 for this */ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_F22); return 1; } if (usage->hid == 0x00880079) { /* Touchpad off, userspace expects F23 for this */ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_F23); return 1; } return -1; } return 0; } static int ite_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct input_dev *input; if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput) return 0; input = field->hidinput->input; /* * The ITE8595 always reports 0 as value for the rfkill button. Luckily * it is the only button in its report, and it sends a report on * release only, so receiving a report means the button was pressed. */ if (usage->hid == HID_GD_RFKILL_BTN) { input_event(input, EV_KEY, KEY_RFKILL, 1); input_sync(input); input_event(input, EV_KEY, KEY_RFKILL, 0); input_sync(input); return 1; } return 0; } static int ite_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; hid_set_drvdata(hdev, (void *)id->driver_data); ret = hid_open_report(hdev); if (ret) return ret; return hid_hw_start(hdev, HID_CONNECT_DEFAULT); } static const struct hid_device_id ite_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) }, { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) }, /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012), .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT }, /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002), .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT }, /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003), .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT }, /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_017), .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT }, { } }; MODULE_DEVICE_TABLE(hid, ite_devices); static struct hid_driver ite_driver = { .name = "itetech", .id_table = ite_devices, .probe = ite_probe, .report_fixup = ite_report_fixup, .input_mapping = ite_input_mapping, .event = ite_event, }; module_hid_driver(ite_driver); MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("HID driver for some ITE \"special\" devices"); MODULE_LICENSE("GPL");
4 2 2 2 10 6 135 134 135 225 225 225 65 64 65 64 5 65 94 93 88 88 88 88 87 87 6 94 94 88 18 13 5 4 1 3 1 1 42 40 2 2 41 2 39 32 3 42 57 52 8 57 44 76 75 21 76 6 75 2 1 75 75 75 30 45 77 68 6 6 1 6 20 1 21 39 38 38 38 38 38 1 38 33 32 32 38 10 32 38 39 7 3 7 6 7 2 3 2 2 34 32 29 33 33 33 7 7 6 28 10 28 27 4 4 24 21 4 18 1 21 21 34 37 14 13 37 34 17 34 68 33 14 37 75 75 74 73 15 73 27 73 38 35 23 22 5 17 22 48 71 11 70 70 70 40 7 40 6 6 1 6 6 6 6 1 1 34 40 39 40 10 8 2 8 8 1 8 1 8 1 1 1 1 1 1 8 31 32 27 32 31 31 31 13 13 8 11 10 2 9 27 8 19 6 19 27 10 20 13 2 11 9 8 12 4 16 16 20 20 13 8 8 8 1 8 8 7 7 5 7 7 5 5 1 8 7 33 30 33 32 30 30 30 13 13 13 8 7 24 9 14 22 12 11 1 9 10 2 2 14 9 2 18 1 18 20 3 20 2 1 2 18 19 5 19 19 30 29 28 28 28 9 8 6 27 1 27 27 27 12 15 14 26 21 12 1 10 30 1 9 7 6 6 6 1 1 5 5 5 2 5 5 1 1 5 5 5 5 1 5 5 5 9 12 8 2 6 4 3 3 2 2 2 1 2 2 1 1 1 1 1 1 1 12 91 1 91 22 21 58 56 55 2 2 55 51 33 53 9 58 49 48 49 36 33 36 28 30 5 5 25 6 1 32 6 4 4 28 27 7 4 1 7 4 3 4 3 4 4 4 4 4 35 32 35 30 3 3 1 30 2 30 1 1 30 2 2 29 29 26 30 30 23 21 23 19 19 19 19 19 8 5 8 5 4 3 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408