| 56 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the ICMP protocol. * * Version: @(#)icmp.h 1.0.3 04/28/93 * * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> */ #ifndef _LINUX_ICMP_H #define _LINUX_ICMP_H #include <linux/skbuff.h> #include <uapi/linux/icmp.h> #include <uapi/linux/errqueue.h> static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb) { return (struct icmphdr *)skb_transport_header(skb); } static inline bool icmp_is_err(int type) { switch (type) { case ICMP_DEST_UNREACH: case ICMP_SOURCE_QUENCH: case ICMP_REDIRECT: case ICMP_TIME_EXCEEDED: case ICMP_PARAMETERPROB: return true; } return false; } void ip_icmp_error_rfc4884(const struct sk_buff *skb, struct sock_ee_data_rfc4884 *out, int thlen, int off); #endif /* _LINUX_ICMP_H */ |
| 4 4 4 4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 | // SPDX-License-Identifier: GPL-2.0-or-later /* * uvc_metadata.c -- USB Video Class driver - Metadata handling * * Copyright (C) 2016 * Guennadi Liakhovetski (guennadi.liakhovetski@intel.com) */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/uvc.h> #include <linux/videodev2.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-vmalloc.h> #include "uvcvideo.h" /* ----------------------------------------------------------------------------- * V4L2 ioctls */ static int uvc_meta_v4l2_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct v4l2_fh *vfh = file_to_v4l2_fh(file); struct uvc_streaming *stream = video_get_drvdata(vfh->vdev); struct uvc_video_chain *chain = stream->chain; strscpy(cap->driver, "uvcvideo", sizeof(cap->driver)); strscpy(cap->card, stream->dev->name, sizeof(cap->card)); usb_make_path(stream->dev->udev, cap->bus_info, sizeof(cap->bus_info)); cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING | chain->caps; return 0; } static int uvc_meta_v4l2_get_format(struct file *file, void *priv, struct v4l2_format *format) { struct v4l2_fh *vfh = file_to_v4l2_fh(file); struct uvc_streaming *stream = video_get_drvdata(vfh->vdev); struct v4l2_meta_format *fmt = &format->fmt.meta; if (format->type != vfh->vdev->queue->type) return -EINVAL; memset(fmt, 0, sizeof(*fmt)); fmt->dataformat = stream->meta.format; fmt->buffersize = UVC_METADATA_BUF_SIZE; return 0; } static int uvc_meta_v4l2_try_format(struct file *file, void *priv, struct v4l2_format *format) { struct v4l2_fh *vfh = file_to_v4l2_fh(file); struct uvc_streaming *stream = video_get_drvdata(vfh->vdev); struct uvc_device *dev = stream->dev; struct v4l2_meta_format *fmt = &format->fmt.meta; u32 fmeta = V4L2_META_FMT_UVC; if (format->type != vfh->vdev->queue->type) return -EINVAL; for (unsigned int i = 0; i < dev->nmeta_formats; i++) if (dev->meta_formats[i] == fmt->dataformat) { fmeta = fmt->dataformat; break; } memset(fmt, 0, sizeof(*fmt)); fmt->dataformat = fmeta; fmt->buffersize = UVC_METADATA_BUF_SIZE; return 0; } static int uvc_meta_v4l2_set_format(struct file *file, void *priv, struct v4l2_format *format) { struct v4l2_fh *vfh = file_to_v4l2_fh(file); struct uvc_streaming *stream = video_get_drvdata(vfh->vdev); struct v4l2_meta_format *fmt = &format->fmt.meta; int ret; ret = uvc_meta_v4l2_try_format(file, priv, format); if (ret < 0) return ret; /* * We could in principle switch at any time, also during streaming. * Metadata buffers would still be perfectly parseable, but it's more * consistent and cleaner to disallow that. */ mutex_lock(&stream->mutex); if (vb2_is_busy(&stream->meta.queue.queue)) ret = -EBUSY; else stream->meta.format = fmt->dataformat; mutex_unlock(&stream->mutex); return ret; } static int uvc_meta_v4l2_enum_formats(struct file *file, void *priv, struct v4l2_fmtdesc *fdesc) { struct v4l2_fh *vfh = file_to_v4l2_fh(file); struct uvc_streaming *stream = video_get_drvdata(vfh->vdev); struct uvc_device *dev = stream->dev; u32 i = fdesc->index; if (fdesc->type != vfh->vdev->queue->type) return -EINVAL; if (i >= dev->nmeta_formats) return -EINVAL; memset(fdesc, 0, sizeof(*fdesc)); fdesc->type = vfh->vdev->queue->type; fdesc->index = i; fdesc->pixelformat = dev->meta_formats[i]; return 0; } static const struct v4l2_ioctl_ops uvc_meta_ioctl_ops = { .vidioc_querycap = uvc_meta_v4l2_querycap, .vidioc_g_fmt_meta_cap = uvc_meta_v4l2_get_format, .vidioc_s_fmt_meta_cap = uvc_meta_v4l2_set_format, .vidioc_try_fmt_meta_cap = uvc_meta_v4l2_try_format, .vidioc_enum_fmt_meta_cap = uvc_meta_v4l2_enum_formats, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, }; /* ----------------------------------------------------------------------------- * V4L2 File Operations */ static const struct v4l2_file_operations uvc_meta_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, .open = v4l2_fh_open, .release = vb2_fop_release, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, }; static struct uvc_entity *uvc_meta_find_msxu(struct uvc_device *dev) { static const u8 uvc_msxu_guid[16] = UVC_GUID_MSXU_1_5; struct uvc_entity *entity; list_for_each_entry(entity, &dev->entities, list) { if (!memcmp(entity->guid, uvc_msxu_guid, sizeof(entity->guid))) return entity; } return NULL; } #define MSXU_CONTROL_METADATA 0x9 static int uvc_meta_detect_msxu(struct uvc_device *dev) { u32 *data __free(kfree) = NULL; struct uvc_entity *entity; int ret; entity = uvc_meta_find_msxu(dev); if (!entity) return 0; /* * USB requires buffers aligned in a special way, simplest way is to * make sure that query_ctrl will work is to kmalloc() them. */ data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; /* Check if the metadata is already enabled. */ ret = uvc_query_ctrl(dev, UVC_GET_CUR, entity->id, dev->intfnum, MSXU_CONTROL_METADATA, data, sizeof(*data)); if (ret) return 0; if (*data) { dev->quirks |= UVC_QUIRK_MSXU_META; return 0; } /* * We have seen devices that require 1 to enable the metadata, others * requiring a value != 1 and others requiring a value >1. Luckily for * us, the value from GET_MAX seems to work all the time. */ ret = uvc_query_ctrl(dev, UVC_GET_MAX, entity->id, dev->intfnum, MSXU_CONTROL_METADATA, data, sizeof(*data)); if (ret || !*data) return 0; /* * If we can set MSXU_CONTROL_METADATA, the device will report * metadata. */ ret = uvc_query_ctrl(dev, UVC_SET_CUR, entity->id, dev->intfnum, MSXU_CONTROL_METADATA, data, sizeof(*data)); if (!ret) dev->quirks |= UVC_QUIRK_MSXU_META; return 0; } int uvc_meta_register(struct uvc_streaming *stream) { struct uvc_device *dev = stream->dev; struct video_device *vdev = &stream->meta.vdev; struct uvc_video_queue *queue = &stream->meta.queue; stream->meta.format = V4L2_META_FMT_UVC; return uvc_register_video_device(dev, stream, vdev, queue, V4L2_BUF_TYPE_META_CAPTURE, &uvc_meta_fops, &uvc_meta_ioctl_ops); } int uvc_meta_init(struct uvc_device *dev) { unsigned int i = 0; int ret; ret = uvc_meta_detect_msxu(dev); if (ret) return ret; dev->meta_formats[i++] = V4L2_META_FMT_UVC; if (dev->info->meta_format && !WARN_ON(dev->info->meta_format == V4L2_META_FMT_UVC)) dev->meta_formats[i++] = dev->info->meta_format; if (dev->quirks & UVC_QUIRK_MSXU_META && !WARN_ON(dev->info->meta_format == V4L2_META_FMT_UVC_MSXU_1_5)) dev->meta_formats[i++] = V4L2_META_FMT_UVC_MSXU_1_5; /* IMPORTANT: for new meta-formats update UVC_MAX_META_DATA_FORMATS. */ dev->nmeta_formats = i; return 0; } |
| 1 1 1 1 1 2 2 1 1 1 1 1 1 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 | // SPDX-License-Identifier: GPL-2.0-only /* DVB USB compliant linux driver for Technotrend DVB USB boxes and clones * (e.g. Pinnacle 400e DVB-S USB2.0). * * The Pinnacle 400e uses the same protocol as the Technotrend USB1.1 boxes. * * TDA8263 + TDA10086 * * I2C addresses: * 0x08 - LNBP21PD - LNB power supply * 0x0e - TDA10086 - Demodulator * 0x50 - FX2 eeprom * 0x60 - TDA8263 - Tuner * 0x78 ??? * * Copyright (c) 2002 Holger Waechtler <holger@convergence.de> * Copyright (c) 2003 Felix Domke <tmbinc@elitedvb.net> * Copyright (C) 2005-6 Patrick Boettcher <pb@linuxtv.org> * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #define DVB_USB_LOG_PREFIX "ttusb2" #include "dvb-usb.h" #include "ttusb2.h" #include "tda826x.h" #include "tda10086.h" #include "tda1002x.h" #include "tda10048.h" #include "tda827x.h" #include "lnbp21.h" /* CA */ #include <media/dvb_ca_en50221.h> /* debug */ static int dvb_usb_ttusb2_debug; #define deb_info(args...) dprintk(dvb_usb_ttusb2_debug,0x01,args) module_param_named(debug,dvb_usb_ttusb2_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able))." DVB_USB_DEBUG_STATUS); static int dvb_usb_ttusb2_debug_ci; module_param_named(debug_ci,dvb_usb_ttusb2_debug_ci, int, 0644); MODULE_PARM_DESC(debug_ci, "set debugging ci." DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); #define ci_dbg(format, arg...) \ do { \ if (dvb_usb_ttusb2_debug_ci) \ printk(KERN_DEBUG DVB_USB_LOG_PREFIX \ ": %s " format "\n" , __func__, ## arg); \ } while (0) enum { TT3650_CMD_CI_TEST = 0x40, TT3650_CMD_CI_RD_CTRL, TT3650_CMD_CI_WR_CTRL, TT3650_CMD_CI_RD_ATTR, TT3650_CMD_CI_WR_ATTR, TT3650_CMD_CI_RESET, TT3650_CMD_CI_SET_VIDEO_PORT }; struct ttusb2_state { struct dvb_ca_en50221 ca; struct mutex ca_mutex; u8 id; u16 last_rc_key; }; static int ttusb2_msg(struct dvb_usb_device *d, u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen) { struct ttusb2_state *st = d->priv; u8 *s, *r = NULL; int ret = 0; if (4 + rlen > 64) return -EIO; s = kzalloc(wlen+4, GFP_KERNEL); if (!s) return -ENOMEM; r = kzalloc(64, GFP_KERNEL); if (!r) { kfree(s); return -ENOMEM; } s[0] = 0xaa; s[1] = ++st->id; s[2] = cmd; s[3] = wlen; memcpy(&s[4],wbuf,wlen); ret = dvb_usb_generic_rw(d, s, wlen+4, r, 64, 0); if (ret != 0 || r[0] != 0x55 || r[1] != s[1] || r[2] != cmd || (rlen > 0 && r[3] != rlen)) { warn("there might have been an error during control message transfer. (rlen = %d, was %d)",rlen,r[3]); kfree(s); kfree(r); return -EIO; } if (rlen > 0) memcpy(rbuf, &r[4], rlen); kfree(s); kfree(r); return 0; } /* ci */ static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data, unsigned int write_len, unsigned int read_len) { int ret; u8 rx[60];/* (64 -4) */ ret = ttusb2_msg(d, cmd, data, write_len, rx, read_len); if (!ret) memcpy(data, rx, read_len); return ret; } static int tt3650_ci_msg_locked(struct dvb_ca_en50221 *ca, u8 cmd, u8 *data, unsigned int write_len, unsigned int read_len) { struct dvb_usb_device *d = ca->data; struct ttusb2_state *state = d->priv; int ret; mutex_lock(&state->ca_mutex); ret = tt3650_ci_msg(d, cmd, data, write_len, read_len); mutex_unlock(&state->ca_mutex); return ret; } static int tt3650_ci_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address) { u8 buf[3]; int ret = 0; if (slot) return -EINVAL; buf[0] = (address >> 8) & 0x0F; buf[1] = address; ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_RD_ATTR, buf, 2, 3); ci_dbg("%04x -> %d 0x%02x", address, ret, buf[2]); if (ret < 0) return ret; return buf[2]; } static int tt3650_ci_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value) { u8 buf[3]; ci_dbg("%d 0x%04x 0x%02x", slot, address, value); if (slot) return -EINVAL; buf[0] = (address >> 8) & 0x0F; buf[1] = address; buf[2] = value; return tt3650_ci_msg_locked(ca, TT3650_CMD_CI_WR_ATTR, buf, 3, 3); } static int tt3650_ci_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address) { u8 buf[2]; int ret; if (slot) return -EINVAL; buf[0] = address & 3; ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_RD_CTRL, buf, 1, 2); ci_dbg("0x%02x -> %d 0x%02x", address, ret, buf[1]); if (ret < 0) return ret; return buf[1]; } static int tt3650_ci_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value) { u8 buf[2]; ci_dbg("%d 0x%02x 0x%02x", slot, address, value); if (slot) return -EINVAL; buf[0] = address; buf[1] = value; return tt3650_ci_msg_locked(ca, TT3650_CMD_CI_WR_CTRL, buf, 2, 2); } static int tt3650_ci_set_video_port(struct dvb_ca_en50221 *ca, int slot, int enable) { u8 buf[1]; int ret; ci_dbg("%d %d", slot, enable); if (slot) return -EINVAL; buf[0] = enable; ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_SET_VIDEO_PORT, buf, 1, 1); if (ret < 0) return ret; if (enable != buf[0]) { err("CI not %sabled.", enable ? "en" : "dis"); return -EIO; } return 0; } static int tt3650_ci_slot_shutdown(struct dvb_ca_en50221 *ca, int slot) { return tt3650_ci_set_video_port(ca, slot, 0); } static int tt3650_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot) { return tt3650_ci_set_video_port(ca, slot, 1); } static int tt3650_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot) { struct dvb_usb_device *d = ca->data; struct ttusb2_state *state = d->priv; u8 buf[1]; int ret; ci_dbg("%d", slot); if (slot) return -EINVAL; buf[0] = 0; mutex_lock(&state->ca_mutex); ret = tt3650_ci_msg(d, TT3650_CMD_CI_RESET, buf, 1, 1); if (ret) goto failed; msleep(500); buf[0] = 1; ret = tt3650_ci_msg(d, TT3650_CMD_CI_RESET, buf, 1, 1); if (ret) goto failed; msleep(500); buf[0] = 0; /* FTA */ ret = tt3650_ci_msg(d, TT3650_CMD_CI_SET_VIDEO_PORT, buf, 1, 1); msleep(1100); failed: mutex_unlock(&state->ca_mutex); return ret; } static int tt3650_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open) { u8 buf[1]; int ret; if (slot) return -EINVAL; ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_TEST, buf, 0, 1); if (ret) return ret; if (1 == buf[0]) { return DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY; } return 0; } static void tt3650_ci_uninit(struct dvb_usb_device *d) { struct ttusb2_state *state; ci_dbg(""); if (NULL == d) return; state = d->priv; if (NULL == state) return; if (NULL == state->ca.data) return; dvb_ca_en50221_release(&state->ca); memset(&state->ca, 0, sizeof(state->ca)); } static int tt3650_ci_init(struct dvb_usb_adapter *a) { struct dvb_usb_device *d = a->dev; struct ttusb2_state *state = d->priv; int ret; ci_dbg(""); mutex_init(&state->ca_mutex); state->ca.owner = THIS_MODULE; state->ca.read_attribute_mem = tt3650_ci_read_attribute_mem; state->ca.write_attribute_mem = tt3650_ci_write_attribute_mem; state->ca.read_cam_control = tt3650_ci_read_cam_control; state->ca.write_cam_control = tt3650_ci_write_cam_control; state->ca.slot_reset = tt3650_ci_slot_reset; state->ca.slot_shutdown = tt3650_ci_slot_shutdown; state->ca.slot_ts_enable = tt3650_ci_slot_ts_enable; state->ca.poll_slot_status = tt3650_ci_poll_slot_status; state->ca.data = d; ret = dvb_ca_en50221_init(&a->dvb_adap, &state->ca, /* flags */ 0, /* n_slots */ 1); if (ret) { err("Cannot initialize CI: Error %d.", ret); memset(&state->ca, 0, sizeof(state->ca)); return ret; } info("CI initialized."); return 0; } static int ttusb2_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); static u8 obuf[60], ibuf[60]; int i, write_read, read; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; if (num > 2) warn("more than 2 i2c messages at a time is not handled yet. TODO."); for (i = 0; i < num; i++) { write_read = i+1 < num && (msg[i+1].flags & I2C_M_RD); read = msg[i].flags & I2C_M_RD; if (3 + msg[i].len > sizeof(obuf)) { err("i2c wr len=%d too high", msg[i].len); break; } if (write_read) { if (3 + msg[i+1].len > sizeof(ibuf)) { err("i2c rd len=%d too high", msg[i+1].len); break; } } else if (read) { if (3 + msg[i].len > sizeof(ibuf)) { err("i2c rd len=%d too high", msg[i].len); break; } } obuf[0] = (msg[i].addr << 1) | (write_read | read); if (read) obuf[1] = 0; else obuf[1] = msg[i].len; /* read request */ if (write_read) obuf[2] = msg[i+1].len; else if (read) obuf[2] = msg[i].len; else obuf[2] = 0; memcpy(&obuf[3], msg[i].buf, msg[i].len); if (ttusb2_msg(d, CMD_I2C_XFER, obuf, obuf[1]+3, ibuf, obuf[2] + 3) < 0) { err("i2c transfer failed."); break; } if (write_read) { memcpy(msg[i+1].buf, &ibuf[3], msg[i+1].len); i++; } else if (read) memcpy(msg[i].buf, &ibuf[3], msg[i].len); } mutex_unlock(&d->i2c_mutex); return i; } static u32 ttusb2_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static const struct i2c_algorithm ttusb2_i2c_algo = { .master_xfer = ttusb2_i2c_xfer, .functionality = ttusb2_i2c_func, }; /* command to poll IR receiver (copied from pctv452e.c) */ #define CMD_GET_IR_CODE 0x1b /* IR */ static int tt3650_rc_query(struct dvb_usb_device *d) { int ret; u8 rx[9]; /* A CMD_GET_IR_CODE reply is 9 bytes long */ struct ttusb2_state *st = d->priv; ret = ttusb2_msg(d, CMD_GET_IR_CODE, NULL, 0, rx, sizeof(rx)); if (ret != 0) return ret; if (rx[8] & 0x01) { /* got a "press" event */ st->last_rc_key = RC_SCANCODE_RC5(rx[3], rx[2]); deb_info("%s: cmd=0x%02x sys=0x%02x\n", __func__, rx[2], rx[3]); rc_keydown(d->rc_dev, RC_PROTO_RC5, st->last_rc_key, rx[1]); } else if (st->last_rc_key) { rc_keyup(d->rc_dev); st->last_rc_key = 0; } return 0; } /* Callbacks for DVB USB */ static int ttusb2_identify_state(struct usb_device *udev, const struct dvb_usb_device_properties *props, const struct dvb_usb_device_description **desc, int *cold) { *cold = udev->descriptor.iManufacturer == 0 && udev->descriptor.iProduct == 0; return 0; } static int ttusb2_power_ctrl(struct dvb_usb_device *d, int onoff) { u8 b = onoff; ttusb2_msg(d, CMD_POWER, &b, 0, NULL, 0); return ttusb2_msg(d, CMD_POWER, &b, 1, NULL, 0); } static struct tda10086_config tda10086_config = { .demod_address = 0x0e, .invert = 0, .diseqc_tone = 1, .xtal_freq = TDA10086_XTAL_16M, }; static struct tda10023_config tda10023_config = { .demod_address = 0x0c, .invert = 0, .xtal = 16000000, .pll_m = 11, .pll_p = 3, .pll_n = 1, .deltaf = 0xa511, }; static struct tda10048_config tda10048_config = { .demod_address = 0x10 >> 1, .output_mode = TDA10048_PARALLEL_OUTPUT, .inversion = TDA10048_INVERSION_ON, .dtv6_if_freq_khz = TDA10048_IF_4000, .dtv7_if_freq_khz = TDA10048_IF_4500, .dtv8_if_freq_khz = TDA10048_IF_5000, .clk_freq_khz = TDA10048_CLK_16000, .no_firmware = 1, .set_pll = true , .pll_m = 5, .pll_n = 3, .pll_p = 0, }; static struct tda827x_config tda827x_config = { .config = 0, }; static int ttusb2_frontend_tda10086_attach(struct dvb_usb_adapter *adap) { if (usb_set_interface(adap->dev->udev,0,3) < 0) err("set interface to alts=3 failed"); if ((adap->fe_adap[0].fe = dvb_attach(tda10086_attach, &tda10086_config, &adap->dev->i2c_adap)) == NULL) { deb_info("TDA10086 attach failed\n"); return -ENODEV; } return 0; } static int ttusb2_ct3650_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct dvb_usb_adapter *adap = fe->dvb->priv; return adap->fe_adap[0].fe->ops.i2c_gate_ctrl(adap->fe_adap[0].fe, enable); } static int ttusb2_frontend_tda10023_attach(struct dvb_usb_adapter *adap) { if (usb_set_interface(adap->dev->udev, 0, 3) < 0) err("set interface to alts=3 failed"); if (adap->fe_adap[0].fe == NULL) { /* FE 0 DVB-C */ adap->fe_adap[0].fe = dvb_attach(tda10023_attach, &tda10023_config, &adap->dev->i2c_adap, 0x48); if (adap->fe_adap[0].fe == NULL) { deb_info("TDA10023 attach failed\n"); return -ENODEV; } tt3650_ci_init(adap); } else { adap->fe_adap[1].fe = dvb_attach(tda10048_attach, &tda10048_config, &adap->dev->i2c_adap); if (adap->fe_adap[1].fe == NULL) { deb_info("TDA10048 attach failed\n"); return -ENODEV; } /* tuner is behind TDA10023 I2C-gate */ adap->fe_adap[1].fe->ops.i2c_gate_ctrl = ttusb2_ct3650_i2c_gate_ctrl; } return 0; } static int ttusb2_tuner_tda827x_attach(struct dvb_usb_adapter *adap) { struct dvb_frontend *fe; /* MFE: select correct FE to attach tuner since that's called twice */ if (adap->fe_adap[1].fe == NULL) fe = adap->fe_adap[0].fe; else fe = adap->fe_adap[1].fe; /* attach tuner */ if (dvb_attach(tda827x_attach, fe, 0x61, &adap->dev->i2c_adap, &tda827x_config) == NULL) { printk(KERN_ERR "%s: No tda827x found!\n", __func__); return -ENODEV; } return 0; } static int ttusb2_tuner_tda826x_attach(struct dvb_usb_adapter *adap) { if (dvb_attach(tda826x_attach, adap->fe_adap[0].fe, 0x60, &adap->dev->i2c_adap, 0) == NULL) { deb_info("TDA8263 attach failed\n"); return -ENODEV; } if (dvb_attach(lnbp21_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, 0, 0) == NULL) { deb_info("LNBP21 attach failed\n"); return -ENODEV; } return 0; } /* DVB USB Driver stuff */ static struct dvb_usb_device_properties ttusb2_properties; static struct dvb_usb_device_properties ttusb2_properties_s2400; static struct dvb_usb_device_properties ttusb2_properties_ct3650; static void ttusb2_usb_disconnect(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); tt3650_ci_uninit(d); dvb_usb_device_exit(intf); } static int ttusb2_probe(struct usb_interface *intf, const struct usb_device_id *id) { if (0 == dvb_usb_device_init(intf, &ttusb2_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &ttusb2_properties_s2400, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &ttusb2_properties_ct3650, THIS_MODULE, NULL, adapter_nr)) return 0; return -ENODEV; } enum { PINNACLE_PCTV_400E, PINNACLE_PCTV_450E, TECHNOTREND_CONNECT_S2400, TECHNOTREND_CONNECT_CT3650, TECHNOTREND_CONNECT_S2400_8KEEPROM, }; static const struct usb_device_id ttusb2_table[] = { DVB_USB_DEV(PINNACLE, PINNACLE_PCTV_400E), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV_450E), DVB_USB_DEV(TECHNOTREND, TECHNOTREND_CONNECT_S2400), DVB_USB_DEV(TECHNOTREND, TECHNOTREND_CONNECT_CT3650), DVB_USB_DEV(TECHNOTREND, TECHNOTREND_CONNECT_S2400_8KEEPROM), { } }; MODULE_DEVICE_TABLE (usb, ttusb2_table); static struct dvb_usb_device_properties ttusb2_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-pctv-400e-01.fw", .size_of_priv = sizeof(struct ttusb2_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = NULL, // ttusb2_streaming_ctrl, .frontend_attach = ttusb2_frontend_tda10086_attach, .tuner_attach = ttusb2_tuner_tda826x_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_ISOC, .count = 5, .endpoint = 0x02, .u = { .isoc = { .framesperurb = 4, .framesize = 940, .interval = 1, } } } }}, } }, .power_ctrl = ttusb2_power_ctrl, .identify_state = ttusb2_identify_state, .i2c_algo = &ttusb2_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 2, .devices = { { "Pinnacle 400e DVB-S USB2.0", { &ttusb2_table[PINNACLE_PCTV_400E], NULL }, { NULL }, }, { "Pinnacle 450e DVB-S USB2.0", { &ttusb2_table[PINNACLE_PCTV_450E], NULL }, { NULL }, }, } }; static struct dvb_usb_device_properties ttusb2_properties_s2400 = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-tt-s2400-01.fw", .size_of_priv = sizeof(struct ttusb2_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = NULL, .frontend_attach = ttusb2_frontend_tda10086_attach, .tuner_attach = ttusb2_tuner_tda826x_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_ISOC, .count = 5, .endpoint = 0x02, .u = { .isoc = { .framesperurb = 4, .framesize = 940, .interval = 1, } } } }}, } }, .power_ctrl = ttusb2_power_ctrl, .identify_state = ttusb2_identify_state, .i2c_algo = &ttusb2_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 2, .devices = { { "Technotrend TT-connect S-2400", { &ttusb2_table[TECHNOTREND_CONNECT_S2400], NULL }, { NULL }, }, { "Technotrend TT-connect S-2400 (8kB EEPROM)", { &ttusb2_table[TECHNOTREND_CONNECT_S2400_8KEEPROM], NULL }, { NULL }, }, } }; static struct dvb_usb_device_properties ttusb2_properties_ct3650 = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .size_of_priv = sizeof(struct ttusb2_state), .rc.core = { .rc_interval = 150, /* Less than IR_KEYPRESS_TIMEOUT */ .rc_codes = RC_MAP_TT_1500, .rc_query = tt3650_rc_query, .allowed_protos = RC_PROTO_BIT_RC5, }, .num_adapters = 1, .adapter = { { .num_frontends = 2, .fe = {{ .streaming_ctrl = NULL, .frontend_attach = ttusb2_frontend_tda10023_attach, .tuner_attach = ttusb2_tuner_tda827x_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_ISOC, .count = 5, .endpoint = 0x02, .u = { .isoc = { .framesperurb = 4, .framesize = 940, .interval = 1, } } } }, { .streaming_ctrl = NULL, .frontend_attach = ttusb2_frontend_tda10023_attach, .tuner_attach = ttusb2_tuner_tda827x_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_ISOC, .count = 5, .endpoint = 0x02, .u = { .isoc = { .framesperurb = 4, .framesize = 940, .interval = 1, } } } }}, }, }, .power_ctrl = ttusb2_power_ctrl, .identify_state = ttusb2_identify_state, .i2c_algo = &ttusb2_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 1, .devices = { { "Technotrend TT-connect CT-3650", .warm_ids = { &ttusb2_table[TECHNOTREND_CONNECT_CT3650], NULL }, }, } }; static struct usb_driver ttusb2_driver = { .name = "dvb_usb_ttusb2", .probe = ttusb2_probe, .disconnect = ttusb2_usb_disconnect, .id_table = ttusb2_table, }; module_usb_driver(ttusb2_driver); MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for Pinnacle PCTV 400e DVB-S USB2.0"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); |
| 11 1 1 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2011 Instituto Nokia de Tecnologia * * Authors: * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> * Lauro Ramos Venancio <lauro.venancio@openbossa.org> */ #include <linux/nfc.h> #include <linux/module.h> #include "nfc.h" static DEFINE_RWLOCK(proto_tab_lock); static const struct nfc_protocol *proto_tab[NFC_SOCKPROTO_MAX]; static int nfc_sock_create(struct net *net, struct socket *sock, int proto, int kern) { int rc = -EPROTONOSUPPORT; if (net != &init_net) return -EAFNOSUPPORT; if (proto < 0 || proto >= NFC_SOCKPROTO_MAX) return -EINVAL; read_lock(&proto_tab_lock); if (proto_tab[proto] && try_module_get(proto_tab[proto]->owner)) { rc = proto_tab[proto]->create(net, sock, proto_tab[proto], kern); module_put(proto_tab[proto]->owner); } read_unlock(&proto_tab_lock); return rc; } static const struct net_proto_family nfc_sock_family_ops = { .owner = THIS_MODULE, .family = PF_NFC, .create = nfc_sock_create, }; int nfc_proto_register(const struct nfc_protocol *nfc_proto) { int rc; if (nfc_proto->id < 0 || nfc_proto->id >= NFC_SOCKPROTO_MAX) return -EINVAL; rc = proto_register(nfc_proto->proto, 0); if (rc) return rc; write_lock(&proto_tab_lock); if (proto_tab[nfc_proto->id]) rc = -EBUSY; else proto_tab[nfc_proto->id] = nfc_proto; write_unlock(&proto_tab_lock); if (rc) proto_unregister(nfc_proto->proto); return rc; } EXPORT_SYMBOL(nfc_proto_register); void nfc_proto_unregister(const struct nfc_protocol *nfc_proto) { write_lock(&proto_tab_lock); proto_tab[nfc_proto->id] = NULL; write_unlock(&proto_tab_lock); proto_unregister(nfc_proto->proto); } EXPORT_SYMBOL(nfc_proto_unregister); int __init af_nfc_init(void) { return sock_register(&nfc_sock_family_ops); } void __exit af_nfc_exit(void) { sock_unregister(PF_NFC); } |
| 12 40 41 41 17 25 25 25 105 23 83 10 10 104 104 104 104 105 55 50 66 39 102 104 38 66 66 38 1 1 1 102 1 2 2 41 40 1 31 11 41 41 41 40 2 7 1 2 2 2 1 1 3 1 4 1 1 1 1 108 1 106 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 | // SPDX-License-Identifier: GPL-2.0-only /* * Fd transport layer. Includes deprecated socket layer. * * Copyright (C) 2006 by Russ Cox <rsc@swtch.com> * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net> * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/in.h> #include <linux/in6.h> #include <linux/module.h> #include <linux/net.h> #include <linux/ipv6.h> #include <linux/kthread.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/un.h> #include <linux/uaccess.h> #include <linux/inet.h> #include <linux/file.h> #include <linux/parser.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include <net/9p/transport.h> #include <linux/syscalls.h> /* killme */ #define P9_PORT 564 #define MAX_SOCK_BUF (1024*1024) #define MAXPOLLWADDR 2 static struct p9_trans_module p9_tcp_trans; static struct p9_trans_module p9_fd_trans; /** * struct p9_fd_opts - per-transport options * @rfd: file descriptor for reading (trans=fd) * @wfd: file descriptor for writing (trans=fd) * @port: port to connect to (trans=tcp) * @privport: port is privileged */ struct p9_fd_opts { int rfd; int wfd; u16 port; bool privport; }; /* * Option Parsing (code inspired by NFS code) * - a little lazy - parse all fd-transport options */ enum { /* Options that take integer arguments */ Opt_port, Opt_rfdno, Opt_wfdno, Opt_err, /* Options that take no arguments */ Opt_privport, }; static const match_table_t tokens = { {Opt_port, "port=%u"}, {Opt_rfdno, "rfdno=%u"}, {Opt_wfdno, "wfdno=%u"}, {Opt_privport, "privport"}, {Opt_err, NULL}, }; enum { Rworksched = 1, /* read work scheduled or running */ Rpending = 2, /* can read */ Wworksched = 4, /* write work scheduled or running */ Wpending = 8, /* can write */ }; struct p9_poll_wait { struct p9_conn *conn; wait_queue_entry_t wait; wait_queue_head_t *wait_addr; }; /** * struct p9_conn - fd mux connection state information * @mux_list: list link for mux to manage multiple connections (?) * @client: reference to client instance for this connection * @err: error state * @req_lock: lock protecting req_list and requests statuses * @req_list: accounting for requests which have been sent * @unsent_req_list: accounting for requests that haven't been sent * @rreq: read request * @wreq: write request * @tmp_buf: temporary buffer to read in header * @rc: temporary fcall for reading current frame * @wpos: write position for current frame * @wsize: amount of data to write for current frame * @wbuf: current write buffer * @poll_pending_link: pending links to be polled per conn * @poll_wait: array of wait_q's for various worker threads * @pt: poll state * @rq: current read work * @wq: current write work * @wsched: ???? * */ struct p9_conn { struct list_head mux_list; struct p9_client *client; int err; spinlock_t req_lock; struct list_head req_list; struct list_head unsent_req_list; struct p9_req_t *rreq; struct p9_req_t *wreq; char tmp_buf[P9_HDRSZ]; struct p9_fcall rc; int wpos; int wsize; char *wbuf; struct list_head poll_pending_link; struct p9_poll_wait poll_wait[MAXPOLLWADDR]; poll_table pt; struct work_struct rq; struct work_struct wq; unsigned long wsched; }; /** * struct p9_trans_fd - transport state * @rd: reference to file to read from * @wr: reference of file to write to * @conn: connection state reference * */ struct p9_trans_fd { struct file *rd; struct file *wr; struct p9_conn conn; }; static void p9_poll_workfn(struct work_struct *work); static DEFINE_SPINLOCK(p9_poll_lock); static LIST_HEAD(p9_poll_pending_list); static DECLARE_WORK(p9_poll_work, p9_poll_workfn); static unsigned int p9_ipport_resv_min = P9_DEF_MIN_RESVPORT; static unsigned int p9_ipport_resv_max = P9_DEF_MAX_RESVPORT; static void p9_mux_poll_stop(struct p9_conn *m) { unsigned long flags; int i; for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) { struct p9_poll_wait *pwait = &m->poll_wait[i]; if (pwait->wait_addr) { remove_wait_queue(pwait->wait_addr, &pwait->wait); pwait->wait_addr = NULL; } } spin_lock_irqsave(&p9_poll_lock, flags); list_del_init(&m->poll_pending_link); spin_unlock_irqrestore(&p9_poll_lock, flags); flush_work(&p9_poll_work); } /** * p9_conn_cancel - cancel all pending requests with error * @m: mux data * @err: error code * */ static void p9_conn_cancel(struct p9_conn *m, int err) { struct p9_req_t *req, *rtmp; LIST_HEAD(cancel_list); p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); spin_lock(&m->req_lock); if (READ_ONCE(m->err)) { spin_unlock(&m->req_lock); return; } WRITE_ONCE(m->err, err); ASSERT_EXCLUSIVE_WRITER(m->err); list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { list_move(&req->req_list, &cancel_list); WRITE_ONCE(req->status, REQ_STATUS_ERROR); } list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { list_move(&req->req_list, &cancel_list); WRITE_ONCE(req->status, REQ_STATUS_ERROR); } spin_unlock(&m->req_lock); list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req); list_del(&req->req_list); if (!req->t_err) req->t_err = err; p9_client_cb(m->client, req, REQ_STATUS_ERROR); } } static __poll_t p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err) { __poll_t ret; struct p9_trans_fd *ts = NULL; if (client && client->status == Connected) ts = client->trans; if (!ts) { if (err) *err = -EREMOTEIO; return EPOLLERR; } ret = vfs_poll(ts->rd, pt); if (ts->rd != ts->wr) ret = (ret & ~EPOLLOUT) | (vfs_poll(ts->wr, pt) & ~EPOLLIN); return ret; } /** * p9_fd_read- read from a fd * @client: client instance * @v: buffer to receive data into * @len: size of receive buffer * */ static int p9_fd_read(struct p9_client *client, void *v, int len) { int ret; struct p9_trans_fd *ts = NULL; loff_t pos; if (client && client->status != Disconnected) ts = client->trans; if (!ts) return -EREMOTEIO; if (!(ts->rd->f_flags & O_NONBLOCK)) p9_debug(P9_DEBUG_ERROR, "blocking read ...\n"); pos = ts->rd->f_pos; ret = kernel_read(ts->rd, v, len, &pos); if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) client->status = Disconnected; return ret; } /** * p9_read_work - called when there is some data to be read from a transport * @work: container of work to be done * */ static void p9_read_work(struct work_struct *work) { __poll_t n; int err; struct p9_conn *m; m = container_of(work, struct p9_conn, rq); if (READ_ONCE(m->err) < 0) return; p9_debug(P9_DEBUG_TRANS, "start mux %p pos %zd\n", m, m->rc.offset); if (!m->rc.sdata) { m->rc.sdata = m->tmp_buf; m->rc.offset = 0; m->rc.capacity = P9_HDRSZ; /* start by reading header */ } clear_bit(Rpending, &m->wsched); p9_debug(P9_DEBUG_TRANS, "read mux %p pos %zd size: %zd = %zd\n", m, m->rc.offset, m->rc.capacity, m->rc.capacity - m->rc.offset); err = p9_fd_read(m->client, m->rc.sdata + m->rc.offset, m->rc.capacity - m->rc.offset); p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err); if (err == -EAGAIN) goto end_clear; if (err <= 0) goto error; m->rc.offset += err; /* header read in */ if ((!m->rreq) && (m->rc.offset == m->rc.capacity)) { p9_debug(P9_DEBUG_TRANS, "got new header\n"); /* Header size */ m->rc.size = P9_HDRSZ; err = p9_parse_header(&m->rc, &m->rc.size, NULL, NULL, 0); if (err) { p9_debug(P9_DEBUG_ERROR, "error parsing header: %d\n", err); goto error; } p9_debug(P9_DEBUG_TRANS, "mux %p pkt: size: %d bytes tag: %d\n", m, m->rc.size, m->rc.tag); m->rreq = p9_tag_lookup(m->client, m->rc.tag); if (!m->rreq || (m->rreq->status != REQ_STATUS_SENT)) { p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", m->rc.tag); err = -EIO; goto error; } if (m->rc.size > m->rreq->rc.capacity) { p9_debug(P9_DEBUG_ERROR, "requested packet size too big: %d for tag %d with capacity %zd\n", m->rc.size, m->rc.tag, m->rreq->rc.capacity); err = -EIO; goto error; } if (!m->rreq->rc.sdata) { p9_debug(P9_DEBUG_ERROR, "No recv fcall for tag %d (req %p), disconnecting!\n", m->rc.tag, m->rreq); p9_req_put(m->client, m->rreq); m->rreq = NULL; err = -EIO; goto error; } m->rc.sdata = m->rreq->rc.sdata; memcpy(m->rc.sdata, m->tmp_buf, m->rc.capacity); m->rc.capacity = m->rc.size; } /* packet is read in * not an else because some packets (like clunk) have no payload */ if ((m->rreq) && (m->rc.offset == m->rc.capacity)) { p9_debug(P9_DEBUG_TRANS, "got new packet\n"); m->rreq->rc.size = m->rc.offset; spin_lock(&m->req_lock); if (m->rreq->status == REQ_STATUS_SENT) { list_del(&m->rreq->req_list); p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD); } else if (m->rreq->status == REQ_STATUS_FLSHD) { /* Ignore replies associated with a cancelled request. */ p9_debug(P9_DEBUG_TRANS, "Ignore replies associated with a cancelled request\n"); } else { spin_unlock(&m->req_lock); p9_debug(P9_DEBUG_ERROR, "Request tag %d errored out while we were reading the reply\n", m->rc.tag); err = -EIO; goto error; } spin_unlock(&m->req_lock); m->rc.sdata = NULL; m->rc.offset = 0; m->rc.capacity = 0; p9_req_put(m->client, m->rreq); m->rreq = NULL; } end_clear: clear_bit(Rworksched, &m->wsched); if (!list_empty(&m->req_list)) { if (test_and_clear_bit(Rpending, &m->wsched)) n = EPOLLIN; else n = p9_fd_poll(m->client, NULL, NULL); if ((n & EPOLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) { p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); schedule_work(&m->rq); } } return; error: p9_conn_cancel(m, err); clear_bit(Rworksched, &m->wsched); } /** * p9_fd_write - write to a socket * @client: client instance * @v: buffer to send data from * @len: size of send buffer * */ static int p9_fd_write(struct p9_client *client, void *v, int len) { ssize_t ret; struct p9_trans_fd *ts = NULL; if (client && client->status != Disconnected) ts = client->trans; if (!ts) return -EREMOTEIO; if (!(ts->wr->f_flags & O_NONBLOCK)) p9_debug(P9_DEBUG_ERROR, "blocking write ...\n"); ret = kernel_write(ts->wr, v, len, &ts->wr->f_pos); if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) client->status = Disconnected; return ret; } /** * p9_write_work - called when a transport can send some data * @work: container for work to be done * */ static void p9_write_work(struct work_struct *work) { __poll_t n; int err; struct p9_conn *m; struct p9_req_t *req; m = container_of(work, struct p9_conn, wq); if (READ_ONCE(m->err) < 0) { clear_bit(Wworksched, &m->wsched); return; } if (!m->wsize) { spin_lock(&m->req_lock); if (list_empty(&m->unsent_req_list)) { clear_bit(Wworksched, &m->wsched); spin_unlock(&m->req_lock); return; } req = list_entry(m->unsent_req_list.next, struct p9_req_t, req_list); WRITE_ONCE(req->status, REQ_STATUS_SENT); p9_debug(P9_DEBUG_TRANS, "move req %p\n", req); list_move_tail(&req->req_list, &m->req_list); m->wbuf = req->tc.sdata; m->wsize = req->tc.size; m->wpos = 0; p9_req_get(req); m->wreq = req; spin_unlock(&m->req_lock); } p9_debug(P9_DEBUG_TRANS, "mux %p pos %d size %d\n", m, m->wpos, m->wsize); clear_bit(Wpending, &m->wsched); err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos); p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err); if (err == -EAGAIN) goto end_clear; if (err < 0) goto error; else if (err == 0) { err = -EREMOTEIO; goto error; } m->wpos += err; if (m->wpos == m->wsize) { m->wpos = m->wsize = 0; p9_req_put(m->client, m->wreq); m->wreq = NULL; } end_clear: clear_bit(Wworksched, &m->wsched); if (m->wsize || !list_empty(&m->unsent_req_list)) { if (test_and_clear_bit(Wpending, &m->wsched)) n = EPOLLOUT; else n = p9_fd_poll(m->client, NULL, NULL); if ((n & EPOLLOUT) && !test_and_set_bit(Wworksched, &m->wsched)) { p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); schedule_work(&m->wq); } } return; error: p9_conn_cancel(m, err); clear_bit(Wworksched, &m->wsched); } static int p9_pollwake(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key) { struct p9_poll_wait *pwait = container_of(wait, struct p9_poll_wait, wait); struct p9_conn *m = pwait->conn; unsigned long flags; spin_lock_irqsave(&p9_poll_lock, flags); if (list_empty(&m->poll_pending_link)) list_add_tail(&m->poll_pending_link, &p9_poll_pending_list); spin_unlock_irqrestore(&p9_poll_lock, flags); schedule_work(&p9_poll_work); return 1; } /** * p9_pollwait - add poll task to the wait queue * @filp: file pointer being polled * @wait_address: wait_q to block on * @p: poll state * * called by files poll operation to add v9fs-poll task to files wait queue */ static void p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p) { struct p9_conn *m = container_of(p, struct p9_conn, pt); struct p9_poll_wait *pwait = NULL; int i; for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) { if (m->poll_wait[i].wait_addr == NULL) { pwait = &m->poll_wait[i]; break; } } if (!pwait) { p9_debug(P9_DEBUG_ERROR, "not enough wait_address slots\n"); return; } pwait->conn = m; pwait->wait_addr = wait_address; init_waitqueue_func_entry(&pwait->wait, p9_pollwake); add_wait_queue(wait_address, &pwait->wait); } /** * p9_conn_create - initialize the per-session mux data * @client: client instance * * Note: Creates the polling task if this is the first session. */ static void p9_conn_create(struct p9_client *client) { __poll_t n; struct p9_trans_fd *ts = client->trans; struct p9_conn *m = &ts->conn; p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize); INIT_LIST_HEAD(&m->mux_list); m->client = client; spin_lock_init(&m->req_lock); INIT_LIST_HEAD(&m->req_list); INIT_LIST_HEAD(&m->unsent_req_list); INIT_WORK(&m->rq, p9_read_work); INIT_WORK(&m->wq, p9_write_work); INIT_LIST_HEAD(&m->poll_pending_link); init_poll_funcptr(&m->pt, p9_pollwait); n = p9_fd_poll(client, &m->pt, NULL); if (n & EPOLLIN) { p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); set_bit(Rpending, &m->wsched); } if (n & EPOLLOUT) { p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); set_bit(Wpending, &m->wsched); } } /** * p9_poll_mux - polls a mux and schedules read or write works if necessary * @m: connection to poll * */ static void p9_poll_mux(struct p9_conn *m) { __poll_t n; int err = -ECONNRESET; if (READ_ONCE(m->err) < 0) return; n = p9_fd_poll(m->client, NULL, &err); if (n & (EPOLLERR | EPOLLHUP | EPOLLNVAL)) { p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n); p9_conn_cancel(m, err); } if (n & EPOLLIN) { set_bit(Rpending, &m->wsched); p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); if (!test_and_set_bit(Rworksched, &m->wsched)) { p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); schedule_work(&m->rq); } } if (n & EPOLLOUT) { set_bit(Wpending, &m->wsched); p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); if ((m->wsize || !list_empty(&m->unsent_req_list)) && !test_and_set_bit(Wworksched, &m->wsched)) { p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); schedule_work(&m->wq); } } } /** * p9_fd_request - send 9P request * The function can sleep until the request is scheduled for sending. * The function can be interrupted. Return from the function is not * a guarantee that the request is sent successfully. * * @client: client instance * @req: request to be sent * */ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req) { __poll_t n; int err; struct p9_trans_fd *ts = client->trans; struct p9_conn *m = &ts->conn; p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", m, current, &req->tc, req->tc.id); spin_lock(&m->req_lock); err = READ_ONCE(m->err); if (err < 0) { spin_unlock(&m->req_lock); return err; } WRITE_ONCE(req->status, REQ_STATUS_UNSENT); list_add_tail(&req->req_list, &m->unsent_req_list); spin_unlock(&m->req_lock); if (test_and_clear_bit(Wpending, &m->wsched)) n = EPOLLOUT; else n = p9_fd_poll(m->client, NULL, NULL); if (n & EPOLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) schedule_work(&m->wq); return 0; } static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req) { struct p9_trans_fd *ts = client->trans; struct p9_conn *m = &ts->conn; int ret = 1; p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req); spin_lock(&m->req_lock); if (req->status == REQ_STATUS_UNSENT) { list_del(&req->req_list); WRITE_ONCE(req->status, REQ_STATUS_FLSHD); p9_req_put(client, req); ret = 0; } spin_unlock(&m->req_lock); return ret; } static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req) { struct p9_trans_fd *ts = client->trans; struct p9_conn *m = &ts->conn; p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req); spin_lock(&m->req_lock); /* Ignore cancelled request if status changed since the request was * processed in p9_client_flush() */ if (req->status != REQ_STATUS_SENT) { spin_unlock(&m->req_lock); return 0; } /* we haven't received a response for oldreq, * remove it from the list. */ list_del(&req->req_list); WRITE_ONCE(req->status, REQ_STATUS_FLSHD); spin_unlock(&m->req_lock); p9_req_put(client, req); return 0; } static int p9_fd_show_options(struct seq_file *m, struct p9_client *clnt) { if (clnt->trans_mod == &p9_tcp_trans) { if (clnt->trans_opts.tcp.port != P9_PORT) seq_printf(m, ",port=%u", clnt->trans_opts.tcp.port); } else if (clnt->trans_mod == &p9_fd_trans) { if (clnt->trans_opts.fd.rfd != ~0) seq_printf(m, ",rfd=%u", clnt->trans_opts.fd.rfd); if (clnt->trans_opts.fd.wfd != ~0) seq_printf(m, ",wfd=%u", clnt->trans_opts.fd.wfd); } return 0; } /** * parse_opts - parse mount options into p9_fd_opts structure * @params: options string passed from mount * @opts: fd transport-specific structure to parse options into * * Returns 0 upon success, -ERRNO upon failure */ static int parse_opts(char *params, struct p9_fd_opts *opts) { char *p; substring_t args[MAX_OPT_ARGS]; int option; char *options, *tmp_options; opts->port = P9_PORT; opts->rfd = ~0; opts->wfd = ~0; opts->privport = false; if (!params) return 0; tmp_options = kstrdup(params, GFP_KERNEL); if (!tmp_options) { p9_debug(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM; } options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; int r; if (!*p) continue; token = match_token(p, tokens, args); if ((token != Opt_err) && (token != Opt_privport)) { r = match_int(&args[0], &option); if (r < 0) { p9_debug(P9_DEBUG_ERROR, "integer field, but no integer?\n"); continue; } } switch (token) { case Opt_port: opts->port = option; break; case Opt_rfdno: opts->rfd = option; break; case Opt_wfdno: opts->wfd = option; break; case Opt_privport: opts->privport = true; break; default: continue; } } kfree(tmp_options); return 0; } static int p9_fd_open(struct p9_client *client, int rfd, int wfd) { struct p9_trans_fd *ts = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL); if (!ts) return -ENOMEM; ts->rd = fget(rfd); if (!ts->rd) goto out_free_ts; if (!(ts->rd->f_mode & FMODE_READ)) goto out_put_rd; /* Prevent workers from hanging on IO when fd is a pipe. * It's technically possible for userspace or concurrent mounts to * modify this flag concurrently, which will likely result in a * broken filesystem. However, just having bad flags here should * not crash the kernel or cause any other sort of bug, so mark this * particular data race as intentional so that tooling (like KCSAN) * can allow it and detect further problems. */ data_race(ts->rd->f_flags |= O_NONBLOCK); ts->wr = fget(wfd); if (!ts->wr) goto out_put_rd; if (!(ts->wr->f_mode & FMODE_WRITE)) goto out_put_wr; data_race(ts->wr->f_flags |= O_NONBLOCK); client->trans = ts; client->status = Connected; return 0; out_put_wr: fput(ts->wr); out_put_rd: fput(ts->rd); out_free_ts: kfree(ts); return -EIO; } static int p9_socket_open(struct p9_client *client, struct socket *csocket) { struct p9_trans_fd *p; struct file *file; p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL); if (!p) { sock_release(csocket); return -ENOMEM; } csocket->sk->sk_allocation = GFP_NOIO; csocket->sk->sk_use_task_frag = false; file = sock_alloc_file(csocket, 0, NULL); if (IS_ERR(file)) { pr_err("%s (%d): failed to map fd\n", __func__, task_pid_nr(current)); kfree(p); return PTR_ERR(file); } get_file(file); p->wr = p->rd = file; client->trans = p; client->status = Connected; p->rd->f_flags |= O_NONBLOCK; p9_conn_create(client); return 0; } /** * p9_conn_destroy - cancels all pending requests of mux * @m: mux to destroy * */ static void p9_conn_destroy(struct p9_conn *m) { p9_debug(P9_DEBUG_TRANS, "mux %p prev %p next %p\n", m, m->mux_list.prev, m->mux_list.next); p9_mux_poll_stop(m); cancel_work_sync(&m->rq); if (m->rreq) { p9_req_put(m->client, m->rreq); m->rreq = NULL; } cancel_work_sync(&m->wq); if (m->wreq) { p9_req_put(m->client, m->wreq); m->wreq = NULL; } p9_conn_cancel(m, -ECONNRESET); m->client = NULL; } /** * p9_fd_close - shutdown file descriptor transport * @client: client instance * */ static void p9_fd_close(struct p9_client *client) { struct p9_trans_fd *ts; if (!client) return; ts = client->trans; if (!ts) return; client->status = Disconnected; p9_conn_destroy(&ts->conn); if (ts->rd) fput(ts->rd); if (ts->wr) fput(ts->wr); kfree(ts); } static int p9_bind_privport(struct socket *sock) { struct sockaddr_storage stor = { 0 }; int port, err = -EINVAL; stor.ss_family = sock->ops->family; if (stor.ss_family == AF_INET) ((struct sockaddr_in *)&stor)->sin_addr.s_addr = htonl(INADDR_ANY); else ((struct sockaddr_in6 *)&stor)->sin6_addr = in6addr_any; for (port = p9_ipport_resv_max; port >= p9_ipport_resv_min; port--) { if (stor.ss_family == AF_INET) ((struct sockaddr_in *)&stor)->sin_port = htons((ushort)port); else ((struct sockaddr_in6 *)&stor)->sin6_port = htons((ushort)port); err = kernel_bind(sock, (struct sockaddr *)&stor, sizeof(stor)); if (err != -EADDRINUSE) break; } return err; } static int p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) { int err; char port_str[6]; struct socket *csocket; struct sockaddr_storage stor = { 0 }; struct p9_fd_opts opts; err = parse_opts(args, &opts); if (err < 0) return err; if (!addr) return -EINVAL; sprintf(port_str, "%u", opts.port); err = inet_pton_with_scope(current->nsproxy->net_ns, AF_UNSPEC, addr, port_str, &stor); if (err < 0) return err; csocket = NULL; client->trans_opts.tcp.port = opts.port; client->trans_opts.tcp.privport = opts.privport; err = __sock_create(current->nsproxy->net_ns, stor.ss_family, SOCK_STREAM, IPPROTO_TCP, &csocket, 1); if (err) { pr_err("%s (%d): problem creating socket\n", __func__, task_pid_nr(current)); return err; } if (opts.privport) { err = p9_bind_privport(csocket); if (err < 0) { pr_err("%s (%d): problem binding to privport\n", __func__, task_pid_nr(current)); sock_release(csocket); return err; } } err = READ_ONCE(csocket->ops)->connect(csocket, (struct sockaddr *)&stor, sizeof(stor), 0); if (err < 0) { pr_err("%s (%d): problem connecting socket to %s\n", __func__, task_pid_nr(current), addr); sock_release(csocket); return err; } return p9_socket_open(client, csocket); } static int p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) { int err; struct socket *csocket; struct sockaddr_un sun_server; csocket = NULL; if (!addr || !strlen(addr)) return -EINVAL; if (strlen(addr) >= UNIX_PATH_MAX) { pr_err("%s (%d): address too long: %s\n", __func__, task_pid_nr(current), addr); return -ENAMETOOLONG; } sun_server.sun_family = PF_UNIX; strcpy(sun_server.sun_path, addr); err = __sock_create(current->nsproxy->net_ns, PF_UNIX, SOCK_STREAM, 0, &csocket, 1); if (err < 0) { pr_err("%s (%d): problem creating socket\n", __func__, task_pid_nr(current)); return err; } err = READ_ONCE(csocket->ops)->connect(csocket, (struct sockaddr *)&sun_server, sizeof(struct sockaddr_un) - 1, 0); if (err < 0) { pr_err("%s (%d): problem connecting socket: %s: %d\n", __func__, task_pid_nr(current), addr, err); sock_release(csocket); return err; } return p9_socket_open(client, csocket); } static int p9_fd_create(struct p9_client *client, const char *addr, char *args) { int err; struct p9_fd_opts opts; err = parse_opts(args, &opts); if (err < 0) return err; client->trans_opts.fd.rfd = opts.rfd; client->trans_opts.fd.wfd = opts.wfd; if (opts.rfd == ~0 || opts.wfd == ~0) { pr_err("Insufficient options for proto=fd\n"); return -ENOPROTOOPT; } err = p9_fd_open(client, opts.rfd, opts.wfd); if (err < 0) return err; p9_conn_create(client); return 0; } static struct p9_trans_module p9_tcp_trans = { .name = "tcp", .maxsize = MAX_SOCK_BUF, .pooled_rbuffers = false, .def = 0, .create = p9_fd_create_tcp, .close = p9_fd_close, .request = p9_fd_request, .cancel = p9_fd_cancel, .cancelled = p9_fd_cancelled, .show_options = p9_fd_show_options, .owner = THIS_MODULE, }; MODULE_ALIAS_9P("tcp"); static struct p9_trans_module p9_unix_trans = { .name = "unix", .maxsize = MAX_SOCK_BUF, .def = 0, .create = p9_fd_create_unix, .close = p9_fd_close, .request = p9_fd_request, .cancel = p9_fd_cancel, .cancelled = p9_fd_cancelled, .show_options = p9_fd_show_options, .owner = THIS_MODULE, }; MODULE_ALIAS_9P("unix"); static struct p9_trans_module p9_fd_trans = { .name = "fd", .maxsize = MAX_SOCK_BUF, .def = 0, .create = p9_fd_create, .close = p9_fd_close, .request = p9_fd_request, .cancel = p9_fd_cancel, .cancelled = p9_fd_cancelled, .show_options = p9_fd_show_options, .owner = THIS_MODULE, }; MODULE_ALIAS_9P("fd"); /** * p9_poll_workfn - poll worker thread * @work: work queue * * polls all v9fs transports for new events and queues the appropriate * work to the work queue * */ static void p9_poll_workfn(struct work_struct *work) { unsigned long flags; p9_debug(P9_DEBUG_TRANS, "start %p\n", current); spin_lock_irqsave(&p9_poll_lock, flags); while (!list_empty(&p9_poll_pending_list)) { struct p9_conn *conn = list_first_entry(&p9_poll_pending_list, struct p9_conn, poll_pending_link); list_del_init(&conn->poll_pending_link); spin_unlock_irqrestore(&p9_poll_lock, flags); p9_poll_mux(conn); spin_lock_irqsave(&p9_poll_lock, flags); } spin_unlock_irqrestore(&p9_poll_lock, flags); p9_debug(P9_DEBUG_TRANS, "finish\n"); } static int __init p9_trans_fd_init(void) { v9fs_register_trans(&p9_tcp_trans); v9fs_register_trans(&p9_unix_trans); v9fs_register_trans(&p9_fd_trans); return 0; } static void __exit p9_trans_fd_exit(void) { flush_work(&p9_poll_work); v9fs_unregister_trans(&p9_tcp_trans); v9fs_unregister_trans(&p9_unix_trans); v9fs_unregister_trans(&p9_fd_trans); } module_init(p9_trans_fd_init); module_exit(p9_trans_fd_exit); MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>"); MODULE_DESCRIPTION("Filedescriptor Transport for 9P"); MODULE_LICENSE("GPL"); |
| 2 3 3 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Force feedback support for Mayflash game controller adapters. * * These devices are manufactured by Mayflash but identify themselves * using the vendor ID of DragonRise Inc. * * Tested with: * 0079:1801 "DragonRise Inc. Mayflash PS3 Game Controller Adapter" * 0079:1803 "DragonRise Inc. Mayflash Wireless Sensor DolphinBar" * 0079:1843 "DragonRise Inc. Mayflash GameCube Game Controller Adapter" * 0079:1844 "DragonRise Inc. Mayflash GameCube Game Controller Adapter (v04)" * * The following adapters probably work too, but need to be tested: * 0079:1800 "DragonRise Inc. Mayflash WIIU Game Controller Adapter" * * Copyright (c) 2016-2017 Marcel Hasler <mahasler@gmail.com> */ /* */ #include <linux/input.h> #include <linux/slab.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" struct mf_device { struct hid_report *report; }; static int mf_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct mf_device *mf = data; int strong, weak; strong = effect->u.rumble.strong_magnitude; weak = effect->u.rumble.weak_magnitude; dbg_hid("Called with 0x%04x 0x%04x.\n", strong, weak); strong = strong * 0xff / 0xffff; weak = weak * 0xff / 0xffff; dbg_hid("Running with 0x%02x 0x%02x.\n", strong, weak); mf->report->field[0]->value[0] = weak; mf->report->field[0]->value[1] = strong; hid_hw_request(hid, mf->report, HID_REQ_SET_REPORT); return 0; } static int mf_init(struct hid_device *hid) { struct mf_device *mf; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct list_head *report_ptr; struct hid_report *report; struct list_head *input_ptr = &hid->inputs; struct hid_input *input; struct input_dev *dev; int error; /* Setup each of the four inputs */ list_for_each(report_ptr, report_list) { report = list_entry(report_ptr, struct hid_report, list); if (report->maxfield < 1 || report->field[0]->report_count < 2) { hid_err(hid, "Invalid report, this should never happen!\n"); return -ENODEV; } if (list_is_last(input_ptr, &hid->inputs)) { hid_err(hid, "Missing input, this should never happen!\n"); return -ENODEV; } input_ptr = input_ptr->next; input = list_entry(input_ptr, struct hid_input, list); mf = kzalloc(sizeof(struct mf_device), GFP_KERNEL); if (!mf) return -ENOMEM; dev = input->input; set_bit(FF_RUMBLE, dev->ffbit); error = input_ff_create_memless(dev, mf, mf_play); if (error) { kfree(mf); return error; } mf->report = report; mf->report->field[0]->value[0] = 0x00; mf->report->field[0]->value[1] = 0x00; hid_hw_request(hid, mf->report, HID_REQ_SET_REPORT); } hid_info(hid, "Force feedback for HJZ Mayflash game controller " "adapters by Marcel Hasler <mahasler@gmail.com>\n"); return 0; } static int mf_probe(struct hid_device *hid, const struct hid_device_id *id) { int error; dev_dbg(&hid->dev, "Mayflash HID hardware probe...\n"); /* Apply quirks as needed */ hid->quirks |= id->driver_data; error = hid_parse(hid); if (error) { hid_err(hid, "HID parse failed.\n"); return error; } error = hid_hw_start(hid, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (error) { hid_err(hid, "HID hw start failed\n"); return error; } error = mf_init(hid); if (error) { hid_err(hid, "Force feedback init failed.\n"); hid_hw_stop(hid); return error; } return 0; } static const struct hid_device_id mf_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), .driver_data = HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), .driver_data = HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), .driver_data = HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2), .driver_data = 0 }, /* No quirk required */ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3), .driver_data = HID_QUIRK_MULTI_INPUT }, { } }; MODULE_DEVICE_TABLE(hid, mf_devices); static struct hid_driver mf_driver = { .name = "hid_mf", .id_table = mf_devices, .probe = mf_probe, }; module_hid_driver(mf_driver); MODULE_DESCRIPTION("Force feedback support for Mayflash game controller adapters."); MODULE_LICENSE("GPL"); |
| 6 6 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 | /* * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2009 HNR Consulting. All rights reserved. * Copyright (c) 2014,2018 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/security.h> #include <linux/xarray.h> #include <rdma/ib_cache.h> #include "mad_priv.h" #include "core_priv.h" #include "mad_rmpp.h" #include "smi.h" #include "opa_smi.h" #include "agent.h" #define CREATE_TRACE_POINTS #include <trace/events/ib_mad.h> #ifdef CONFIG_TRACEPOINTS static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_qp_info *qp_info, struct trace_event_raw_ib_mad_send_template *entry) { struct ib_ud_wr *wr = &mad_send_wr->send_wr; struct rdma_ah_attr attr = {}; rdma_query_ah(wr->ah, &attr); /* These are common */ entry->sl = attr.sl; entry->rqpn = wr->remote_qpn; entry->rqkey = wr->remote_qkey; entry->dlid = rdma_ah_get_dlid(&attr); } #endif static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; module_param_named(send_queue_size, mad_sendq_size, int, 0444); MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); module_param_named(recv_queue_size, mad_recvq_size, int, 0444); MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); static DEFINE_XARRAY_ALLOC1(ib_mad_clients); static u32 ib_mad_client_next; static struct list_head ib_mad_port_list; /* Port list lock */ static DEFINE_SPINLOCK(ib_mad_port_list_lock); /* Forward declarations */ static int method_in_use(struct ib_mad_mgmt_method_table **method, struct ib_mad_reg_req *mad_reg_req); static void remove_mad_reg_req(struct ib_mad_agent_private *priv); static struct ib_mad_agent_private *find_mad_agent( struct ib_mad_port_private *port_priv, const struct ib_mad_hdr *mad); static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, struct ib_mad_private *mad); static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); static void timeout_sends(struct work_struct *work); static void local_completions(struct work_struct *work); static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, struct ib_mad_agent_private *agent_priv, u8 mgmt_class); static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, struct ib_mad_agent_private *agent_priv); static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, struct ib_wc *wc); static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc); /* * Returns a ib_mad_port_private structure or NULL for a device/port * Assumes ib_mad_port_list_lock is being held */ static inline struct ib_mad_port_private * __ib_get_mad_port(struct ib_device *device, u32 port_num) { struct ib_mad_port_private *entry; list_for_each_entry(entry, &ib_mad_port_list, port_list) { if (entry->device == device && entry->port_num == port_num) return entry; } return NULL; } /* * Wrapper function to return a ib_mad_port_private structure or NULL * for a device/port */ static inline struct ib_mad_port_private * ib_get_mad_port(struct ib_device *device, u32 port_num) { struct ib_mad_port_private *entry; unsigned long flags; spin_lock_irqsave(&ib_mad_port_list_lock, flags); entry = __ib_get_mad_port(device, port_num); spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); return entry; } static inline u8 convert_mgmt_class(u8 mgmt_class) { /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */ return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? 0 : mgmt_class; } static int get_spl_qp_index(enum ib_qp_type qp_type) { switch (qp_type) { case IB_QPT_SMI: return 0; case IB_QPT_GSI: return 1; default: return -1; } } static int vendor_class_index(u8 mgmt_class) { return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; } static int is_vendor_class(u8 mgmt_class) { if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) || (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END)) return 0; return 1; } static int is_vendor_oui(char *oui) { if (oui[0] || oui[1] || oui[2]) return 1; return 0; } static int is_vendor_method_in_use( struct ib_mad_mgmt_vendor_class *vendor_class, struct ib_mad_reg_req *mad_reg_req) { struct ib_mad_mgmt_method_table *method; int i; for (i = 0; i < MAX_MGMT_OUI; i++) { if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { method = vendor_class->method_table[i]; if (method) { if (method_in_use(&method, mad_reg_req)) return 1; else break; } } } return 0; } int ib_response_mad(const struct ib_mad_hdr *hdr) { return ((hdr->method & IB_MGMT_METHOD_RESP) || (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); } EXPORT_SYMBOL(ib_response_mad); #define SOL_FC_MAX_DEFAULT_FRAC 4 #define SOL_FC_MAX_SA_FRAC 32 static int get_sol_fc_max_outstanding(struct ib_mad_reg_req *mad_reg_req) { if (!mad_reg_req) /* Send only agent */ return mad_recvq_size / SOL_FC_MAX_DEFAULT_FRAC; switch (mad_reg_req->mgmt_class) { case IB_MGMT_CLASS_CM: return mad_recvq_size / SOL_FC_MAX_DEFAULT_FRAC; case IB_MGMT_CLASS_SUBN_ADM: return mad_recvq_size / SOL_FC_MAX_SA_FRAC; case IB_MGMT_CLASS_SUBN_LID_ROUTED: case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: return min(mad_recvq_size, IB_MAD_QP_RECV_SIZE) / SOL_FC_MAX_DEFAULT_FRAC; default: return 0; } } /* * ib_register_mad_agent - Register to send/receive MADs * * Context: Process context. */ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, u32 port_num, enum ib_qp_type qp_type, struct ib_mad_reg_req *mad_reg_req, u8 rmpp_version, ib_mad_send_handler send_handler, ib_mad_recv_handler recv_handler, void *context, u32 registration_flags) { struct ib_mad_port_private *port_priv; struct ib_mad_agent *ret = ERR_PTR(-EINVAL); struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_reg_req *reg_req = NULL; struct ib_mad_mgmt_class_table *class; struct ib_mad_mgmt_vendor_class_table *vendor; struct ib_mad_mgmt_vendor_class *vendor_class; struct ib_mad_mgmt_method_table *method; int ret2, qpn; u8 mgmt_class, vclass; if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) || (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num))) return ERR_PTR(-EPROTONOSUPPORT); /* Validate parameters */ qpn = get_spl_qp_index(qp_type); if (qpn == -1) { dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n", __func__, qp_type); goto error1; } if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { dev_dbg_ratelimited(&device->dev, "%s: invalid RMPP Version %u\n", __func__, rmpp_version); goto error1; } /* Validate MAD registration request if supplied */ if (mad_reg_req) { if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { dev_dbg_ratelimited(&device->dev, "%s: invalid Class Version %u\n", __func__, mad_reg_req->mgmt_class_version); goto error1; } if (!recv_handler) { dev_dbg_ratelimited(&device->dev, "%s: no recv_handler\n", __func__); goto error1; } if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { /* * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only * one in this range currently allowed */ if (mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { dev_dbg_ratelimited(&device->dev, "%s: Invalid Mgmt Class 0x%x\n", __func__, mad_reg_req->mgmt_class); goto error1; } } else if (mad_reg_req->mgmt_class == 0) { /* * Class 0 is reserved in IBA and is used for * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE */ dev_dbg_ratelimited(&device->dev, "%s: Invalid Mgmt Class 0\n", __func__); goto error1; } else if (is_vendor_class(mad_reg_req->mgmt_class)) { /* * If class is in "new" vendor range, * ensure supplied OUI is not zero */ if (!is_vendor_oui(mad_reg_req->oui)) { dev_dbg_ratelimited(&device->dev, "%s: No OUI specified for class 0x%x\n", __func__, mad_reg_req->mgmt_class); goto error1; } } /* Make sure class supplied is consistent with RMPP */ if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { if (rmpp_version) { dev_dbg_ratelimited(&device->dev, "%s: RMPP version for non-RMPP class 0x%x\n", __func__, mad_reg_req->mgmt_class); goto error1; } } /* Make sure class supplied is consistent with QP type */ if (qp_type == IB_QPT_SMI) { if ((mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) && (mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { dev_dbg_ratelimited(&device->dev, "%s: Invalid SM QP type: class 0x%x\n", __func__, mad_reg_req->mgmt_class); goto error1; } } else { if ((mad_reg_req->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || (mad_reg_req->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { dev_dbg_ratelimited(&device->dev, "%s: Invalid GS QP type: class 0x%x\n", __func__, mad_reg_req->mgmt_class); goto error1; } } } else { /* No registration request supplied */ if (!send_handler) goto error1; if (registration_flags & IB_MAD_USER_RMPP) goto error1; } /* Validate device and port */ port_priv = ib_get_mad_port(device, port_num); if (!port_priv) { dev_dbg_ratelimited(&device->dev, "%s: Invalid port %u\n", __func__, port_num); ret = ERR_PTR(-ENODEV); goto error1; } /* Verify the QP requested is supported. For example, Ethernet devices * will not have QP0. */ if (!port_priv->qp_info[qpn].qp) { dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n", __func__, qpn); ret = ERR_PTR(-EPROTONOSUPPORT); goto error1; } /* Allocate structures */ mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); if (!mad_agent_priv) { ret = ERR_PTR(-ENOMEM); goto error1; } if (mad_reg_req) { reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); if (!reg_req) { ret = ERR_PTR(-ENOMEM); goto error3; } } /* Now, fill in the various structures */ mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; mad_agent_priv->reg_req = reg_req; mad_agent_priv->agent.rmpp_version = rmpp_version; mad_agent_priv->agent.device = device; mad_agent_priv->agent.recv_handler = recv_handler; mad_agent_priv->agent.send_handler = send_handler; mad_agent_priv->agent.context = context; mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; mad_agent_priv->agent.port_num = port_num; mad_agent_priv->agent.flags = registration_flags; spin_lock_init(&mad_agent_priv->lock); INIT_LIST_HEAD(&mad_agent_priv->send_list); INIT_LIST_HEAD(&mad_agent_priv->wait_list); INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); INIT_LIST_HEAD(&mad_agent_priv->backlog_list); INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); INIT_LIST_HEAD(&mad_agent_priv->local_list); INIT_WORK(&mad_agent_priv->local_work, local_completions); refcount_set(&mad_agent_priv->refcount, 1); init_completion(&mad_agent_priv->comp); mad_agent_priv->sol_fc_send_count = 0; mad_agent_priv->sol_fc_wait_count = 0; mad_agent_priv->sol_fc_max = recv_handler ? get_sol_fc_max_outstanding(mad_reg_req) : 0; ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); if (ret2) { ret = ERR_PTR(ret2); goto error4; } /* * The mlx4 driver uses the top byte to distinguish which virtual * function generated the MAD, so we must avoid using it. */ ret2 = xa_alloc_cyclic(&ib_mad_clients, &mad_agent_priv->agent.hi_tid, mad_agent_priv, XA_LIMIT(0, (1 << 24) - 1), &ib_mad_client_next, GFP_KERNEL); if (ret2 < 0) { ret = ERR_PTR(ret2); goto error5; } /* * Make sure MAD registration (if supplied) * is non overlapping with any existing ones */ spin_lock_irq(&port_priv->reg_lock); if (mad_reg_req) { mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); if (!is_vendor_class(mgmt_class)) { class = port_priv->version[mad_reg_req-> mgmt_class_version].class; if (class) { method = class->method_table[mgmt_class]; if (method) { if (method_in_use(&method, mad_reg_req)) goto error6; } } ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, mgmt_class); } else { /* "New" vendor class range */ vendor = port_priv->version[mad_reg_req-> mgmt_class_version].vendor; if (vendor) { vclass = vendor_class_index(mgmt_class); vendor_class = vendor->vendor_class[vclass]; if (vendor_class) { if (is_vendor_method_in_use( vendor_class, mad_reg_req)) goto error6; } } ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); } if (ret2) { ret = ERR_PTR(ret2); goto error6; } } spin_unlock_irq(&port_priv->reg_lock); trace_ib_mad_create_agent(mad_agent_priv); return &mad_agent_priv->agent; error6: spin_unlock_irq(&port_priv->reg_lock); xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid); error5: ib_mad_agent_security_cleanup(&mad_agent_priv->agent); error4: kfree(reg_req); error3: kfree(mad_agent_priv); error1: return ret; } EXPORT_SYMBOL(ib_register_mad_agent); static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) { if (refcount_dec_and_test(&mad_agent_priv->refcount)) complete(&mad_agent_priv->comp); } static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) { struct ib_mad_port_private *port_priv; /* Note that we could still be handling received MADs */ trace_ib_mad_unregister_agent(mad_agent_priv); /* * Canceling all sends results in dropping received response * MADs, preventing us from queuing additional work */ cancel_mads(mad_agent_priv); port_priv = mad_agent_priv->qp_info->port_priv; cancel_delayed_work(&mad_agent_priv->timed_work); spin_lock_irq(&port_priv->reg_lock); remove_mad_reg_req(mad_agent_priv); spin_unlock_irq(&port_priv->reg_lock); xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid); flush_workqueue(port_priv->wq); deref_mad_agent(mad_agent_priv); wait_for_completion(&mad_agent_priv->comp); ib_cancel_rmpp_recvs(mad_agent_priv); ib_mad_agent_security_cleanup(&mad_agent_priv->agent); kfree(mad_agent_priv->reg_req); kfree_rcu(mad_agent_priv, rcu); } /* * ib_unregister_mad_agent - Unregisters a client from using MAD services * * Context: Process context. */ void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) { struct ib_mad_agent_private *mad_agent_priv; mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, agent); unregister_mad_agent(mad_agent_priv); } EXPORT_SYMBOL(ib_unregister_mad_agent); static void dequeue_mad(struct ib_mad_list_head *mad_list) { struct ib_mad_queue *mad_queue; unsigned long flags; mad_queue = mad_list->mad_queue; spin_lock_irqsave(&mad_queue->lock, flags); list_del(&mad_list->list); mad_queue->count--; spin_unlock_irqrestore(&mad_queue->lock, flags); } static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid, u16 pkey_index, u32 port_num, struct ib_wc *wc) { memset(wc, 0, sizeof *wc); wc->wr_cqe = cqe; wc->status = IB_WC_SUCCESS; wc->opcode = IB_WC_RECV; wc->pkey_index = pkey_index; wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); wc->src_qp = IB_QP0; wc->qp = qp; wc->slid = slid; wc->sl = 0; wc->dlid_path_bits = 0; wc->port_num = port_num; } static size_t mad_priv_size(const struct ib_mad_private *mp) { return sizeof(struct ib_mad_private) + mp->mad_size; } static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags) { size_t size = sizeof(struct ib_mad_private) + mad_size; struct ib_mad_private *ret = kzalloc(size, flags); if (ret) ret->mad_size = mad_size; return ret; } static size_t port_mad_size(const struct ib_mad_port_private *port_priv) { return rdma_max_mad_size(port_priv->device, port_priv->port_num); } static size_t mad_priv_dma_size(const struct ib_mad_private *mp) { return sizeof(struct ib_grh) + mp->mad_size; } /* * Return 0 if SMP is to be sent * Return 1 if SMP was consumed locally (whether or not solicited) * Return < 0 if error */ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_wr_private *mad_send_wr) { int ret = 0; struct ib_smp *smp = mad_send_wr->send_buf.mad; struct opa_smp *opa_smp = (struct opa_smp *)smp; unsigned long flags; struct ib_mad_local_private *local; struct ib_mad_private *mad_priv; struct ib_mad_port_private *port_priv; struct ib_mad_agent_private *recv_mad_agent = NULL; struct ib_device *device = mad_agent_priv->agent.device; u32 port_num; struct ib_wc mad_wc; struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); u16 out_mad_pkey_index = 0; u16 drslid; bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, mad_agent_priv->qp_info->port_priv->port_num); if (rdma_cap_ib_switch(device) && smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) port_num = send_wr->port_num; else port_num = mad_agent_priv->agent.port_num; /* * Directed route handling starts if the initial LID routed part of * a request or the ending LID routed part of a response is empty. * If we are at the start of the LID routed part, don't update the * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. */ if (opa && smp->class_version == OPA_SM_CLASS_VERSION) { u32 opa_drslid; trace_ib_mad_handle_out_opa_smi(opa_smp); if ((opa_get_smp_direction(opa_smp) ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == OPA_LID_PERMISSIVE && opa_smi_handle_dr_smp_send(opa_smp, rdma_cap_ib_switch(device), port_num) == IB_SMI_DISCARD) { ret = -EINVAL; dev_err(&device->dev, "OPA Invalid directed route\n"); goto out; } opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) && opa_drslid & 0xffff0000) { ret = -EINVAL; dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", opa_drslid); goto out; } drslid = (u16)(opa_drslid & 0x0000ffff); /* Check to post send on QP or process locally */ if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD && opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD) goto out; } else { trace_ib_mad_handle_out_ib_smi(smp); if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == IB_LID_PERMISSIVE && smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) == IB_SMI_DISCARD) { ret = -EINVAL; dev_err(&device->dev, "Invalid directed route\n"); goto out; } drslid = be16_to_cpu(smp->dr_slid); /* Check to post send on QP or process locally */ if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) goto out; } local = kmalloc(sizeof *local, GFP_ATOMIC); if (!local) { ret = -ENOMEM; goto out; } local->mad_priv = NULL; local->recv_mad_agent = NULL; mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC); if (!mad_priv) { ret = -ENOMEM; kfree(local); goto out; } build_smp_wc(mad_agent_priv->agent.qp, send_wr->wr.wr_cqe, drslid, send_wr->pkey_index, send_wr->port_num, &mad_wc); if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { mad_wc.byte_len = mad_send_wr->send_buf.hdr_len + mad_send_wr->send_buf.data_len + sizeof(struct ib_grh); } /* No GRH for DR SMP */ ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL, (const struct ib_mad *)smp, (struct ib_mad *)mad_priv->mad, &mad_size, &out_mad_pkey_index); switch (ret) { case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && mad_agent_priv->agent.recv_handler) { local->mad_priv = mad_priv; local->recv_mad_agent = mad_agent_priv; /* * Reference MAD agent until receive * side of local completion handled */ refcount_inc(&mad_agent_priv->refcount); } else kfree(mad_priv); break; case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: kfree(mad_priv); break; case IB_MAD_RESULT_SUCCESS: /* Treat like an incoming receive MAD */ port_priv = ib_get_mad_port(mad_agent_priv->agent.device, mad_agent_priv->agent.port_num); if (port_priv) { memcpy(mad_priv->mad, smp, mad_priv->mad_size); recv_mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)mad_priv->mad); } if (!port_priv || !recv_mad_agent) { /* * No receiving agent so drop packet and * generate send completion. */ kfree(mad_priv); break; } local->mad_priv = mad_priv; local->recv_mad_agent = recv_mad_agent; break; default: kfree(mad_priv); kfree(local); ret = -EINVAL; goto out; } local->mad_send_wr = mad_send_wr; if (opa) { local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index; local->return_wc_byte_len = mad_size; } /* Reference MAD agent until send side of local completion handled */ refcount_inc(&mad_agent_priv->refcount); /* Queue local completion to local list */ spin_lock_irqsave(&mad_agent_priv->lock, flags); list_add_tail(&local->completion_list, &mad_agent_priv->local_list); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); queue_work(mad_agent_priv->qp_info->port_priv->wq, &mad_agent_priv->local_work); ret = 1; out: return ret; } static int get_pad_size(int hdr_len, int data_len, size_t mad_size) { int seg_size, pad; seg_size = mad_size - hdr_len; if (data_len && seg_size) { pad = seg_size - data_len % seg_size; return pad == seg_size ? 0 : pad; } else return seg_size; } static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_segment *s, *t; list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { list_del(&s->list); kfree(s); } } static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, size_t mad_size, gfp_t gfp_mask) { struct ib_mad_send_buf *send_buf = &send_wr->send_buf; struct ib_rmpp_mad *rmpp_mad = send_buf->mad; struct ib_rmpp_segment *seg = NULL; int left, seg_size, pad; send_buf->seg_size = mad_size - send_buf->hdr_len; send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR; seg_size = send_buf->seg_size; pad = send_wr->pad; /* Allocate data segments. */ for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { seg = kmalloc(sizeof(*seg) + seg_size, gfp_mask); if (!seg) { free_send_rmpp_list(send_wr); return -ENOMEM; } seg->num = ++send_buf->seg_count; list_add_tail(&seg->list, &send_wr->rmpp_list); } /* Zero any padding */ if (pad) memset(seg->data + seg_size - pad, 0, pad); rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> agent.rmpp_version; rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); send_wr->cur_seg = container_of(send_wr->rmpp_list.next, struct ib_rmpp_segment, list); send_wr->last_ack_seg = send_wr->cur_seg; return 0; } int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) { return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); } EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent, u32 remote_qpn, u16 pkey_index, int rmpp_active, int hdr_len, int data_len, gfp_t gfp_mask, u8 base_version) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *mad_send_wr; int pad, message_size, ret, size; void *buf; size_t mad_size; bool opa; mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, agent); opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num); if (opa && base_version == OPA_MGMT_BASE_VERSION) mad_size = sizeof(struct opa_mad); else mad_size = sizeof(struct ib_mad); pad = get_pad_size(hdr_len, data_len, mad_size); message_size = hdr_len + data_len + pad; if (ib_mad_kernel_rmpp_agent(mad_agent)) { if (!rmpp_active && message_size > mad_size) return ERR_PTR(-EINVAL); } else if (rmpp_active || message_size > mad_size) return ERR_PTR(-EINVAL); size = rmpp_active ? hdr_len : mad_size; buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); if (!buf) return ERR_PTR(-ENOMEM); mad_send_wr = buf + size; INIT_LIST_HEAD(&mad_send_wr->rmpp_list); mad_send_wr->send_buf.mad = buf; mad_send_wr->send_buf.hdr_len = hdr_len; mad_send_wr->send_buf.data_len = data_len; mad_send_wr->pad = pad; mad_send_wr->mad_agent_priv = mad_agent_priv; mad_send_wr->sg_list[0].length = hdr_len; mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; /* OPA MADs don't have to be the full 2048 bytes */ if (opa && base_version == OPA_MGMT_BASE_VERSION && data_len < mad_size - hdr_len) mad_send_wr->sg_list[1].length = data_len; else mad_send_wr->sg_list[1].length = mad_size - hdr_len; mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; mad_send_wr->mad_list.cqe.done = ib_mad_send_done; mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; mad_send_wr->send_wr.wr.num_sge = 2; mad_send_wr->send_wr.wr.opcode = IB_WR_SEND; mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED; mad_send_wr->send_wr.remote_qpn = remote_qpn; mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY; mad_send_wr->send_wr.pkey_index = pkey_index; if (rmpp_active) { ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); if (ret) { kfree(buf); return ERR_PTR(ret); } } mad_send_wr->send_buf.mad_agent = mad_agent; refcount_inc(&mad_agent_priv->refcount); return &mad_send_wr->send_buf; } EXPORT_SYMBOL(ib_create_send_mad); int ib_get_mad_data_offset(u8 mgmt_class) { if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) return IB_MGMT_SA_HDR; else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || (mgmt_class == IB_MGMT_CLASS_BIS)) return IB_MGMT_DEVICE_HDR; else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) return IB_MGMT_VENDOR_HDR; else return IB_MGMT_MAD_HDR; } EXPORT_SYMBOL(ib_get_mad_data_offset); int ib_is_mad_class_rmpp(u8 mgmt_class) { if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || (mgmt_class == IB_MGMT_CLASS_BIS) || ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) return 1; return 0; } EXPORT_SYMBOL(ib_is_mad_class_rmpp); void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) { struct ib_mad_send_wr_private *mad_send_wr; struct list_head *list; mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); list = &mad_send_wr->cur_seg->list; if (mad_send_wr->cur_seg->num < seg_num) { list_for_each_entry(mad_send_wr->cur_seg, list, list) if (mad_send_wr->cur_seg->num == seg_num) break; } else if (mad_send_wr->cur_seg->num > seg_num) { list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list) if (mad_send_wr->cur_seg->num == seg_num) break; } return mad_send_wr->cur_seg->data; } EXPORT_SYMBOL(ib_get_rmpp_segment); static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) { if (mad_send_wr->send_buf.seg_count) return ib_get_rmpp_segment(&mad_send_wr->send_buf, mad_send_wr->seg_num); else return mad_send_wr->send_buf.mad + mad_send_wr->send_buf.hdr_len; } void ib_free_send_mad(struct ib_mad_send_buf *send_buf) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *mad_send_wr; mad_agent_priv = container_of(send_buf->mad_agent, struct ib_mad_agent_private, agent); mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); free_send_rmpp_list(mad_send_wr); kfree(send_buf->mad); deref_mad_agent(mad_agent_priv); } EXPORT_SYMBOL(ib_free_send_mad); int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_mad_qp_info *qp_info; struct list_head *list; struct ib_mad_agent *mad_agent; struct ib_sge *sge; unsigned long flags; int ret; /* Set WR ID to find mad_send_wr upon completion */ qp_info = mad_send_wr->mad_agent_priv->qp_info; mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; mad_send_wr->mad_list.cqe.done = ib_mad_send_done; mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; mad_agent = mad_send_wr->send_buf.mad_agent; sge = mad_send_wr->sg_list; sge[0].addr = ib_dma_map_single(mad_agent->device, mad_send_wr->send_buf.mad, sge[0].length, DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) return -ENOMEM; mad_send_wr->header_mapping = sge[0].addr; sge[1].addr = ib_dma_map_single(mad_agent->device, ib_get_payload(mad_send_wr), sge[1].length, DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { ib_dma_unmap_single(mad_agent->device, mad_send_wr->header_mapping, sge[0].length, DMA_TO_DEVICE); return -ENOMEM; } mad_send_wr->payload_mapping = sge[1].addr; spin_lock_irqsave(&qp_info->send_queue.lock, flags); if (qp_info->send_queue.count < qp_info->send_queue.max_active) { trace_ib_mad_ib_send_mad(mad_send_wr, qp_info); ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, NULL); list = &qp_info->send_queue.list; } else { ret = 0; list = &qp_info->overflow_list; } if (!ret) { qp_info->send_queue.count++; list_add_tail(&mad_send_wr->mad_list.list, list); } spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); if (ret) { ib_dma_unmap_single(mad_agent->device, mad_send_wr->header_mapping, sge[0].length, DMA_TO_DEVICE); ib_dma_unmap_single(mad_agent->device, mad_send_wr->payload_mapping, sge[1].length, DMA_TO_DEVICE); } return ret; } static void handle_queued_state(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_agent_private *mad_agent_priv) { if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP) { mad_agent_priv->sol_fc_wait_count--; list_move_tail(&mad_send_wr->agent_list, &mad_agent_priv->backlog_list); } else { expect_mad_state(mad_send_wr, IB_MAD_STATE_INIT); list_add_tail(&mad_send_wr->agent_list, &mad_agent_priv->backlog_list); } } static void handle_send_state(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_agent_private *mad_agent_priv) { if (mad_send_wr->state == IB_MAD_STATE_INIT) { list_add_tail(&mad_send_wr->agent_list, &mad_agent_priv->send_list); } else { expect_mad_state2(mad_send_wr, IB_MAD_STATE_WAIT_RESP, IB_MAD_STATE_QUEUED); list_move_tail(&mad_send_wr->agent_list, &mad_agent_priv->send_list); } if (mad_send_wr->is_solicited_fc) { if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP) mad_agent_priv->sol_fc_wait_count--; mad_agent_priv->sol_fc_send_count++; } } static void handle_wait_state(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_agent_private *mad_agent_priv) { struct ib_mad_send_wr_private *temp_mad_send_wr; struct list_head *list_item; unsigned long delay; expect_mad_state3(mad_send_wr, IB_MAD_STATE_SEND_START, IB_MAD_STATE_WAIT_RESP, IB_MAD_STATE_CANCELED); if (mad_send_wr->state == IB_MAD_STATE_SEND_START && mad_send_wr->is_solicited_fc) { mad_agent_priv->sol_fc_send_count--; mad_agent_priv->sol_fc_wait_count++; } list_del_init(&mad_send_wr->agent_list); delay = mad_send_wr->timeout; mad_send_wr->timeout += jiffies; if (delay) { list_for_each_prev(list_item, &mad_agent_priv->wait_list) { temp_mad_send_wr = list_entry( list_item, struct ib_mad_send_wr_private, agent_list); if (time_after(mad_send_wr->timeout, temp_mad_send_wr->timeout)) break; } } else { list_item = &mad_agent_priv->wait_list; } list_add(&mad_send_wr->agent_list, list_item); } static void handle_early_resp_state(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_agent_private *mad_agent_priv) { expect_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START); mad_agent_priv->sol_fc_send_count -= mad_send_wr->is_solicited_fc; } static void handle_canceled_state(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_agent_private *mad_agent_priv) { not_expect_mad_state(mad_send_wr, IB_MAD_STATE_DONE); if (mad_send_wr->is_solicited_fc) { if (mad_send_wr->state == IB_MAD_STATE_SEND_START) mad_agent_priv->sol_fc_send_count--; else if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP) mad_agent_priv->sol_fc_wait_count--; } } static void handle_done_state(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_agent_private *mad_agent_priv) { if (mad_send_wr->is_solicited_fc) { if (mad_send_wr->state == IB_MAD_STATE_SEND_START) mad_agent_priv->sol_fc_send_count--; else if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP) mad_agent_priv->sol_fc_wait_count--; } list_del_init(&mad_send_wr->agent_list); } void change_mad_state(struct ib_mad_send_wr_private *mad_send_wr, enum ib_mad_state new_state) { struct ib_mad_agent_private *mad_agent_priv = mad_send_wr->mad_agent_priv; switch (new_state) { case IB_MAD_STATE_INIT: break; case IB_MAD_STATE_QUEUED: handle_queued_state(mad_send_wr, mad_agent_priv); break; case IB_MAD_STATE_SEND_START: handle_send_state(mad_send_wr, mad_agent_priv); break; case IB_MAD_STATE_WAIT_RESP: handle_wait_state(mad_send_wr, mad_agent_priv); if (mad_send_wr->state == IB_MAD_STATE_CANCELED) return; break; case IB_MAD_STATE_EARLY_RESP: handle_early_resp_state(mad_send_wr, mad_agent_priv); break; case IB_MAD_STATE_CANCELED: handle_canceled_state(mad_send_wr, mad_agent_priv); break; case IB_MAD_STATE_DONE: handle_done_state(mad_send_wr, mad_agent_priv); break; } mad_send_wr->state = new_state; } static bool is_solicited_fc_mad(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; u8 mgmt_class; if (!mad_send_wr->timeout) return 0; rmpp_mad = mad_send_wr->send_buf.mad; if (mad_send_wr->mad_agent_priv->agent.rmpp_version && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) return 0; mgmt_class = ((struct ib_mad_hdr *)mad_send_wr->send_buf.mad)->mgmt_class; return mgmt_class == IB_MGMT_CLASS_CM || mgmt_class == IB_MGMT_CLASS_SUBN_ADM || mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE; } static bool mad_is_for_backlog(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_mad_agent_private *mad_agent_priv = mad_send_wr->mad_agent_priv; if (!mad_send_wr->is_solicited_fc || !mad_agent_priv->sol_fc_max) return false; if (!list_empty(&mad_agent_priv->backlog_list)) return true; return mad_agent_priv->sol_fc_send_count + mad_agent_priv->sol_fc_wait_count >= mad_agent_priv->sol_fc_max; } /* * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated * with the registered client */ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, struct ib_mad_send_buf **bad_send_buf) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_buf *next_send_buf; struct ib_mad_send_wr_private *mad_send_wr; unsigned long flags; int ret = -EINVAL; /* Walk list of send WRs and post each on send list */ for (; send_buf; send_buf = next_send_buf) { mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); mad_agent_priv = mad_send_wr->mad_agent_priv; ret = ib_mad_enforce_security(mad_agent_priv, mad_send_wr->send_wr.pkey_index); if (ret) goto error; if (!send_buf->mad_agent->send_handler) { ret = -EINVAL; goto error; } if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { if (mad_agent_priv->agent.rmpp_version) { ret = -EINVAL; goto error; } } /* * Save pointer to next work request to post in case the * current one completes, and the user modifies the work * request associated with the completion */ next_send_buf = send_buf->next; mad_send_wr->send_wr.ah = send_buf->ah; if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { ret = handle_outgoing_dr_smp(mad_agent_priv, mad_send_wr); if (ret < 0) /* error */ goto error; else if (ret == 1) /* locally consumed */ continue; } mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; /* Timeout will be updated after send completes */ mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); mad_send_wr->max_retries = send_buf->retries; mad_send_wr->retries_left = send_buf->retries; send_buf->retries = 0; change_mad_state(mad_send_wr, IB_MAD_STATE_INIT); /* Reference MAD agent until send completes */ refcount_inc(&mad_agent_priv->refcount); spin_lock_irqsave(&mad_agent_priv->lock, flags); mad_send_wr->is_solicited_fc = is_solicited_fc_mad(mad_send_wr); if (mad_is_for_backlog(mad_send_wr)) { change_mad_state(mad_send_wr, IB_MAD_STATE_QUEUED); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); return 0; } change_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { ret = ib_send_rmpp_mad(mad_send_wr); if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) ret = ib_send_mad(mad_send_wr); } else ret = ib_send_mad(mad_send_wr); if (ret < 0) { /* Fail send request */ spin_lock_irqsave(&mad_agent_priv->lock, flags); change_mad_state(mad_send_wr, IB_MAD_STATE_DONE); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); deref_mad_agent(mad_agent_priv); goto error; } } return 0; error: if (bad_send_buf) *bad_send_buf = send_buf; return ret; } EXPORT_SYMBOL(ib_post_send_mad); /* * ib_free_recv_mad - Returns data buffers used to receive * a MAD to the access layer */ void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) { struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; struct ib_mad_private_header *mad_priv_hdr; struct ib_mad_private *priv; struct list_head free_list; INIT_LIST_HEAD(&free_list); list_splice_init(&mad_recv_wc->rmpp_list, &free_list); list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, &free_list, list) { mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, recv_buf); mad_priv_hdr = container_of(mad_recv_wc, struct ib_mad_private_header, recv_wc); priv = container_of(mad_priv_hdr, struct ib_mad_private, header); kfree(priv); } } EXPORT_SYMBOL(ib_free_recv_mad); static int method_in_use(struct ib_mad_mgmt_method_table **method, struct ib_mad_reg_req *mad_reg_req) { int i; for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { if ((*method)->agent[i]) { pr_err("Method %d already in use\n", i); return -EINVAL; } } return 0; } static int allocate_method_table(struct ib_mad_mgmt_method_table **method) { /* Allocate management method table */ *method = kzalloc(sizeof **method, GFP_ATOMIC); return (*method) ? 0 : (-ENOMEM); } /* * Check to see if there are any methods still in use */ static int check_method_table(struct ib_mad_mgmt_method_table *method) { int i; for (i = 0; i < IB_MGMT_MAX_METHODS; i++) if (method->agent[i]) return 1; return 0; } /* * Check to see if there are any method tables for this class still in use */ static int check_class_table(struct ib_mad_mgmt_class_table *class) { int i; for (i = 0; i < MAX_MGMT_CLASS; i++) if (class->method_table[i]) return 1; return 0; } static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) { int i; for (i = 0; i < MAX_MGMT_OUI; i++) if (vendor_class->method_table[i]) return 1; return 0; } static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, const char *oui) { int i; for (i = 0; i < MAX_MGMT_OUI; i++) /* Is there matching OUI for this vendor class ? */ if (!memcmp(vendor_class->oui[i], oui, 3)) return i; return -1; } static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor) { int i; for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) if (vendor->vendor_class[i]) return 1; return 0; } static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, struct ib_mad_agent_private *agent) { int i; /* Remove any methods for this mad agent */ for (i = 0; i < IB_MGMT_MAX_METHODS; i++) if (method->agent[i] == agent) method->agent[i] = NULL; } static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, struct ib_mad_agent_private *agent_priv, u8 mgmt_class) { struct ib_mad_port_private *port_priv; struct ib_mad_mgmt_class_table **class; struct ib_mad_mgmt_method_table **method; int i, ret; port_priv = agent_priv->qp_info->port_priv; class = &port_priv->version[mad_reg_req->mgmt_class_version].class; if (!*class) { /* Allocate management class table for "new" class version */ *class = kzalloc(sizeof **class, GFP_ATOMIC); if (!*class) { ret = -ENOMEM; goto error1; } /* Allocate method table for this management class */ method = &(*class)->method_table[mgmt_class]; if ((ret = allocate_method_table(method))) goto error2; } else { method = &(*class)->method_table[mgmt_class]; if (!*method) { /* Allocate method table for this management class */ if ((ret = allocate_method_table(method))) goto error1; } } /* Now, make sure methods are not already in use */ if (method_in_use(method, mad_reg_req)) goto error3; /* Finally, add in methods being registered */ for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) (*method)->agent[i] = agent_priv; return 0; error3: /* Remove any methods for this mad agent */ remove_methods_mad_agent(*method, agent_priv); /* Now, check to see if there are any methods in use */ if (!check_method_table(*method)) { /* If not, release management method table */ kfree(*method); *method = NULL; } ret = -EINVAL; goto error1; error2: kfree(*class); *class = NULL; error1: return ret; } static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, struct ib_mad_agent_private *agent_priv) { struct ib_mad_port_private *port_priv; struct ib_mad_mgmt_vendor_class_table **vendor_table; struct ib_mad_mgmt_vendor_class_table *vendor = NULL; struct ib_mad_mgmt_vendor_class *vendor_class = NULL; struct ib_mad_mgmt_method_table **method; int i, ret = -ENOMEM; u8 vclass; /* "New" vendor (with OUI) class */ vclass = vendor_class_index(mad_reg_req->mgmt_class); port_priv = agent_priv->qp_info->port_priv; vendor_table = &port_priv->version[ mad_reg_req->mgmt_class_version].vendor; if (!*vendor_table) { /* Allocate mgmt vendor class table for "new" class version */ vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); if (!vendor) goto error1; *vendor_table = vendor; } if (!(*vendor_table)->vendor_class[vclass]) { /* Allocate table for this management vendor class */ vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); if (!vendor_class) goto error2; (*vendor_table)->vendor_class[vclass] = vendor_class; } for (i = 0; i < MAX_MGMT_OUI; i++) { /* Is there matching OUI for this vendor class ? */ if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], mad_reg_req->oui, 3)) { method = &(*vendor_table)->vendor_class[ vclass]->method_table[i]; if (!*method) goto error3; goto check_in_use; } } for (i = 0; i < MAX_MGMT_OUI; i++) { /* OUI slot available ? */ if (!is_vendor_oui((*vendor_table)->vendor_class[ vclass]->oui[i])) { method = &(*vendor_table)->vendor_class[ vclass]->method_table[i]; /* Allocate method table for this OUI */ if (!*method) { ret = allocate_method_table(method); if (ret) goto error3; } memcpy((*vendor_table)->vendor_class[vclass]->oui[i], mad_reg_req->oui, 3); goto check_in_use; } } dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); goto error3; check_in_use: /* Now, make sure methods are not already in use */ if (method_in_use(method, mad_reg_req)) goto error4; /* Finally, add in methods being registered */ for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) (*method)->agent[i] = agent_priv; return 0; error4: /* Remove any methods for this mad agent */ remove_methods_mad_agent(*method, agent_priv); /* Now, check to see if there are any methods in use */ if (!check_method_table(*method)) { /* If not, release management method table */ kfree(*method); *method = NULL; } ret = -EINVAL; error3: if (vendor_class) { (*vendor_table)->vendor_class[vclass] = NULL; kfree(vendor_class); } error2: if (vendor) { *vendor_table = NULL; kfree(vendor); } error1: return ret; } static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) { struct ib_mad_port_private *port_priv; struct ib_mad_mgmt_class_table *class; struct ib_mad_mgmt_method_table *method; struct ib_mad_mgmt_vendor_class_table *vendor; struct ib_mad_mgmt_vendor_class *vendor_class; int index; u8 mgmt_class; /* * Was MAD registration request supplied * with original registration ? */ if (!agent_priv->reg_req) goto out; port_priv = agent_priv->qp_info->port_priv; mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); class = port_priv->version[ agent_priv->reg_req->mgmt_class_version].class; if (!class) goto vendor_check; method = class->method_table[mgmt_class]; if (method) { /* Remove any methods for this mad agent */ remove_methods_mad_agent(method, agent_priv); /* Now, check to see if there are any methods still in use */ if (!check_method_table(method)) { /* If not, release management method table */ kfree(method); class->method_table[mgmt_class] = NULL; /* Any management classes left ? */ if (!check_class_table(class)) { /* If not, release management class table */ kfree(class); port_priv->version[ agent_priv->reg_req-> mgmt_class_version].class = NULL; } } } vendor_check: if (!is_vendor_class(mgmt_class)) goto out; /* normalize mgmt_class to vendor range 2 */ mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); vendor = port_priv->version[ agent_priv->reg_req->mgmt_class_version].vendor; if (!vendor) goto out; vendor_class = vendor->vendor_class[mgmt_class]; if (vendor_class) { index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); if (index < 0) goto out; method = vendor_class->method_table[index]; if (method) { /* Remove any methods for this mad agent */ remove_methods_mad_agent(method, agent_priv); /* * Now, check to see if there are * any methods still in use */ if (!check_method_table(method)) { /* If not, release management method table */ kfree(method); vendor_class->method_table[index] = NULL; memset(vendor_class->oui[index], 0, 3); /* Any OUIs left ? */ if (!check_vendor_class(vendor_class)) { /* If not, release vendor class table */ kfree(vendor_class); vendor->vendor_class[mgmt_class] = NULL; /* Any other vendor classes left ? */ if (!check_vendor_table(vendor)) { kfree(vendor); port_priv->version[ agent_priv->reg_req-> mgmt_class_version]. vendor = NULL; } } } } } out: return; } static struct ib_mad_agent_private * find_mad_agent(struct ib_mad_port_private *port_priv, const struct ib_mad_hdr *mad_hdr) { struct ib_mad_agent_private *mad_agent = NULL; unsigned long flags; if (ib_response_mad(mad_hdr)) { u32 hi_tid; /* * Routing is based on high 32 bits of transaction ID * of MAD. */ hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; rcu_read_lock(); mad_agent = xa_load(&ib_mad_clients, hi_tid); if (mad_agent && !refcount_inc_not_zero(&mad_agent->refcount)) mad_agent = NULL; rcu_read_unlock(); } else { struct ib_mad_mgmt_class_table *class; struct ib_mad_mgmt_method_table *method; struct ib_mad_mgmt_vendor_class_table *vendor; struct ib_mad_mgmt_vendor_class *vendor_class; const struct ib_vendor_mad *vendor_mad; int index; spin_lock_irqsave(&port_priv->reg_lock, flags); /* * Routing is based on version, class, and method * For "newer" vendor MADs, also based on OUI */ if (mad_hdr->class_version >= MAX_MGMT_VERSION) goto out; if (!is_vendor_class(mad_hdr->mgmt_class)) { class = port_priv->version[ mad_hdr->class_version].class; if (!class) goto out; if (convert_mgmt_class(mad_hdr->mgmt_class) >= ARRAY_SIZE(class->method_table)) goto out; method = class->method_table[convert_mgmt_class( mad_hdr->mgmt_class)]; if (method) mad_agent = method->agent[mad_hdr->method & ~IB_MGMT_METHOD_RESP]; } else { vendor = port_priv->version[ mad_hdr->class_version].vendor; if (!vendor) goto out; vendor_class = vendor->vendor_class[vendor_class_index( mad_hdr->mgmt_class)]; if (!vendor_class) goto out; /* Find matching OUI */ vendor_mad = (const struct ib_vendor_mad *)mad_hdr; index = find_vendor_oui(vendor_class, vendor_mad->oui); if (index == -1) goto out; method = vendor_class->method_table[index]; if (method) { mad_agent = method->agent[mad_hdr->method & ~IB_MGMT_METHOD_RESP]; } } if (mad_agent) refcount_inc(&mad_agent->refcount); out: spin_unlock_irqrestore(&port_priv->reg_lock, flags); } if (mad_agent && !mad_agent->agent.recv_handler) { dev_notice(&port_priv->device->dev, "No receive handler for client %p on port %u\n", &mad_agent->agent, port_priv->port_num); deref_mad_agent(mad_agent); mad_agent = NULL; } return mad_agent; } static int validate_mad(const struct ib_mad_hdr *mad_hdr, const struct ib_mad_qp_info *qp_info, bool opa) { int valid = 0; u32 qp_num = qp_info->qp->qp_num; /* Make sure MAD base version is understood */ if (mad_hdr->base_version != IB_MGMT_BASE_VERSION && (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) { pr_err("MAD received with unsupported base version %u %s\n", mad_hdr->base_version, opa ? "(opa)" : ""); goto out; } /* Filter SMI packets sent to other than QP0 */ if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { if (qp_num == 0) valid = 1; } else { /* CM attributes other than ClassPortInfo only use Send method */ if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && (mad_hdr->method != IB_MGMT_METHOD_SEND)) goto out; /* Filter GSI packets sent to QP0 */ if (qp_num != 0) valid = 1; } out: return valid; } static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, const struct ib_mad_hdr *mad_hdr) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; return !mad_agent_priv->agent.rmpp_version || !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE) || (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); } static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, const struct ib_mad_recv_wc *rwc) { return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == rwc->recv_buf.mad->mad_hdr.mgmt_class; } static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, const struct ib_mad_send_wr_private *wr, const struct ib_mad_recv_wc *rwc) { struct rdma_ah_attr attr; u8 send_resp, rcv_resp; union ib_gid sgid; struct ib_device *device = mad_agent_priv->agent.device; u32 port_num = mad_agent_priv->agent.port_num; u8 lmc; bool has_grh; send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); if (send_resp == rcv_resp) /* both requests, or both responses. GIDs different */ return 0; if (rdma_query_ah(wr->send_buf.ah, &attr)) /* Assume not equal, to avoid false positives. */ return 0; has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH); if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH)) /* one has GID, other does not. Assume different */ return 0; if (!send_resp && rcv_resp) { /* is request/response. */ if (!has_grh) { if (ib_get_cached_lmc(device, port_num, &lmc)) return 0; return (!lmc || !((rdma_ah_get_path_bits(&attr) ^ rwc->wc->dlid_path_bits) & ((1 << lmc) - 1))); } else { const struct ib_global_route *grh = rdma_ah_read_grh(&attr); if (rdma_query_gid(device, port_num, grh->sgid_index, &sgid)) return 0; return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, 16); } } if (!has_grh) return rdma_ah_get_dlid(&attr) == rwc->wc->slid; else return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw, rwc->recv_buf.grh->sgid.raw, 16); } static inline int is_direct(u8 class) { return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); } struct ib_mad_send_wr_private* ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, const struct ib_mad_recv_wc *wc) { struct ib_mad_send_wr_private *wr; const struct ib_mad_hdr *mad_hdr; mad_hdr = &wc->recv_buf.mad->mad_hdr; list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { if ((wr->tid == mad_hdr->tid) && rcv_has_same_class(wr, wc) && /* * Don't check GID for direct routed MADs. * These might have permissive LIDs. */ (is_direct(mad_hdr->mgmt_class) || rcv_has_same_gid(mad_agent_priv, wr, wc))) return (wr->state != IB_MAD_STATE_CANCELED) ? wr : NULL; } list_for_each_entry(wr, &mad_agent_priv->backlog_list, agent_list) { if ((wr->tid == mad_hdr->tid) && rcv_has_same_class(wr, wc) && /* * Don't check GID for direct routed MADs. * These might have permissive LIDs. */ (is_direct(mad_hdr->mgmt_class) || rcv_has_same_gid(mad_agent_priv, wr, wc))) return (wr->state != IB_MAD_STATE_CANCELED) ? wr : NULL; } /* * It's possible to receive the response before we've * been notified that the send has completed */ list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && wr->tid == mad_hdr->tid && wr->timeout && rcv_has_same_class(wr, wc) && /* * Don't check GID for direct routed MADs. * These might have permissive LIDs. */ (is_direct(mad_hdr->mgmt_class) || rcv_has_same_gid(mad_agent_priv, wr, wc))) /* Verify request has not been canceled */ return (wr->state != IB_MAD_STATE_CANCELED) ? wr : NULL; } return NULL; } static void process_backlog_mads(struct ib_mad_agent_private *mad_agent_priv) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc = {}; unsigned long flags; int ret; spin_lock_irqsave(&mad_agent_priv->lock, flags); while (!list_empty(&mad_agent_priv->backlog_list) && (mad_agent_priv->sol_fc_send_count + mad_agent_priv->sol_fc_wait_count < mad_agent_priv->sol_fc_max)) { mad_send_wr = list_entry(mad_agent_priv->backlog_list.next, struct ib_mad_send_wr_private, agent_list); change_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); ret = ib_send_mad(mad_send_wr); if (ret) { spin_lock_irqsave(&mad_agent_priv->lock, flags); deref_mad_agent(mad_agent_priv); change_mad_state(mad_send_wr, IB_MAD_STATE_DONE); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); mad_send_wc.send_buf = &mad_send_wr->send_buf; mad_send_wc.status = IB_WC_LOC_QP_OP_ERR; mad_agent_priv->agent.send_handler( &mad_agent_priv->agent, &mad_send_wc); } spin_lock_irqsave(&mad_agent_priv->lock, flags); } spin_unlock_irqrestore(&mad_agent_priv->lock, flags); } void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) { mad_send_wr->timeout = 0; if (mad_send_wr->state == IB_MAD_STATE_WAIT_RESP || mad_send_wr->state == IB_MAD_STATE_QUEUED) change_mad_state(mad_send_wr, IB_MAD_STATE_DONE); else change_mad_state(mad_send_wr, IB_MAD_STATE_EARLY_RESP); } static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags; bool is_mad_done; int ret; INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); ret = ib_mad_enforce_security(mad_agent_priv, mad_recv_wc->wc->pkey_index); if (ret) { ib_free_recv_mad(mad_recv_wc); deref_mad_agent(mad_agent_priv); return; } list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, mad_recv_wc); if (!mad_recv_wc) { deref_mad_agent(mad_agent_priv); return; } } /* Complete corresponding request */ if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { spin_lock_irqsave(&mad_agent_priv->lock, flags); mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); if (!mad_send_wr) { spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) { /* user rmpp is in effect * and this is an active RMPP MAD */ mad_agent_priv->agent.recv_handler( &mad_agent_priv->agent, NULL, mad_recv_wc); deref_mad_agent(mad_agent_priv); } else { /* not user rmpp, revert to normal behavior and * drop the mad */ ib_free_recv_mad(mad_recv_wc); deref_mad_agent(mad_agent_priv); return; } } else { ib_mark_mad_done(mad_send_wr); is_mad_done = (mad_send_wr->state == IB_MAD_STATE_DONE); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); /* Defined behavior is to complete response before request */ mad_agent_priv->agent.recv_handler( &mad_agent_priv->agent, &mad_send_wr->send_buf, mad_recv_wc); deref_mad_agent(mad_agent_priv); if (is_mad_done) { mad_send_wc.status = IB_WC_SUCCESS; mad_send_wc.vendor_err = 0; mad_send_wc.send_buf = &mad_send_wr->send_buf; ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); } } } else { mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL, mad_recv_wc); deref_mad_agent(mad_agent_priv); } } static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, const struct ib_mad_qp_info *qp_info, const struct ib_wc *wc, u32 port_num, struct ib_mad_private *recv, struct ib_mad_private *response) { enum smi_forward_action retsmi; struct ib_smp *smp = (struct ib_smp *)recv->mad; trace_ib_mad_handle_ib_smi(smp); if (smi_handle_dr_smp_recv(smp, rdma_cap_ib_switch(port_priv->device), port_num, port_priv->device->phys_port_cnt) == IB_SMI_DISCARD) return IB_SMI_DISCARD; retsmi = smi_check_forward_dr_smp(smp); if (retsmi == IB_SMI_LOCAL) return IB_SMI_HANDLE; if (retsmi == IB_SMI_SEND) { /* don't forward */ if (smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(port_priv->device), port_num) == IB_SMI_DISCARD) return IB_SMI_DISCARD; if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) return IB_SMI_DISCARD; } else if (rdma_cap_ib_switch(port_priv->device)) { /* forward case for switches */ memcpy(response, recv, mad_priv_size(response)); response->header.recv_wc.wc = &response->header.wc; response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; response->header.recv_wc.recv_buf.grh = &response->grh; agent_send_response((const struct ib_mad_hdr *)response->mad, &response->grh, wc, port_priv->device, smi_get_fwd_port(smp), qp_info->qp->qp_num, response->mad_size, false); return IB_SMI_DISCARD; } return IB_SMI_HANDLE; } static bool generate_unmatched_resp(const struct ib_mad_private *recv, struct ib_mad_private *response, size_t *resp_len, bool opa) { const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad; struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad; if (recv_hdr->method == IB_MGMT_METHOD_GET || recv_hdr->method == IB_MGMT_METHOD_SET) { memcpy(response, recv, mad_priv_size(response)); response->header.recv_wc.wc = &response->header.wc; response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; response->header.recv_wc.recv_buf.grh = &response->grh; resp_hdr->method = IB_MGMT_METHOD_GET_RESP; resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) resp_hdr->status |= IB_SMP_DIRECTION; if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) { if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) *resp_len = opa_get_smp_header_size( (struct opa_smp *)recv->mad); else *resp_len = sizeof(struct ib_mad_hdr); } return true; } else { return false; } } static enum smi_action handle_opa_smi(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info, struct ib_wc *wc, u32 port_num, struct ib_mad_private *recv, struct ib_mad_private *response) { enum smi_forward_action retsmi; struct opa_smp *smp = (struct opa_smp *)recv->mad; trace_ib_mad_handle_opa_smi(smp); if (opa_smi_handle_dr_smp_recv(smp, rdma_cap_ib_switch(port_priv->device), port_num, port_priv->device->phys_port_cnt) == IB_SMI_DISCARD) return IB_SMI_DISCARD; retsmi = opa_smi_check_forward_dr_smp(smp); if (retsmi == IB_SMI_LOCAL) return IB_SMI_HANDLE; if (retsmi == IB_SMI_SEND) { /* don't forward */ if (opa_smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(port_priv->device), port_num) == IB_SMI_DISCARD) return IB_SMI_DISCARD; if (opa_smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) return IB_SMI_DISCARD; } else if (rdma_cap_ib_switch(port_priv->device)) { /* forward case for switches */ memcpy(response, recv, mad_priv_size(response)); response->header.recv_wc.wc = &response->header.wc; response->header.recv_wc.recv_buf.opa_mad = (struct opa_mad *)response->mad; response->header.recv_wc.recv_buf.grh = &response->grh; agent_send_response((const struct ib_mad_hdr *)response->mad, &response->grh, wc, port_priv->device, opa_smi_get_fwd_port(smp), qp_info->qp->qp_num, recv->header.wc.byte_len, true); return IB_SMI_DISCARD; } return IB_SMI_HANDLE; } static enum smi_action handle_smi(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info, struct ib_wc *wc, u32 port_num, struct ib_mad_private *recv, struct ib_mad_private *response, bool opa) { struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && mad_hdr->class_version == OPA_SM_CLASS_VERSION) return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, response); return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); } static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc) { struct ib_mad_port_private *port_priv = cq->cq_context; struct ib_mad_list_head *mad_list = container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); struct ib_mad_qp_info *qp_info; struct ib_mad_private_header *mad_priv_hdr; struct ib_mad_private *recv, *response = NULL; struct ib_mad_agent_private *mad_agent; u32 port_num; int ret = IB_MAD_RESULT_SUCCESS; size_t mad_size; u16 resp_mad_pkey_index = 0; bool opa; if (list_empty_careful(&port_priv->port_list)) return; if (wc->status != IB_WC_SUCCESS) { /* * Receive errors indicate that the QP has entered the error * state - error handling/shutdown code will cleanup */ return; } qp_info = mad_list->mad_queue->qp_info; dequeue_mad(mad_list); opa = rdma_cap_opa_mad(qp_info->port_priv->device, qp_info->port_priv->port_num); mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, mad_list); recv = container_of(mad_priv_hdr, struct ib_mad_private, header); ib_dma_unmap_single(port_priv->device, recv->header.mapping, mad_priv_dma_size(recv), DMA_FROM_DEVICE); /* Setup MAD receive work completion from "normal" work completion */ recv->header.wc = *wc; recv->header.recv_wc.wc = &recv->header.wc; if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) { recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh); recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); } else { recv->header.recv_wc.mad_len = sizeof(struct ib_mad); recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); } recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad; recv->header.recv_wc.recv_buf.grh = &recv->grh; /* Validate MAD */ if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) goto out; trace_ib_mad_recv_done_handler(qp_info, wc, (struct ib_mad_hdr *)recv->mad); mad_size = recv->mad_size; response = alloc_mad_private(mad_size, GFP_KERNEL); if (!response) goto out; if (rdma_cap_ib_switch(port_priv->device)) port_num = wc->port_num; else port_num = port_priv->port_num; if (((struct ib_mad_hdr *)recv->mad)->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { if (handle_smi(port_priv, qp_info, wc, port_num, recv, response, opa) == IB_SMI_DISCARD) goto out; } /* Give driver "right of first refusal" on incoming MAD */ if (port_priv->device->ops.process_mad) { ret = port_priv->device->ops.process_mad( port_priv->device, 0, port_priv->port_num, wc, &recv->grh, (const struct ib_mad *)recv->mad, (struct ib_mad *)response->mad, &mad_size, &resp_mad_pkey_index); if (opa) wc->pkey_index = resp_mad_pkey_index; if (ret & IB_MAD_RESULT_SUCCESS) { if (ret & IB_MAD_RESULT_CONSUMED) goto out; if (ret & IB_MAD_RESULT_REPLY) { agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, port_priv->device, port_num, qp_info->qp->qp_num, mad_size, opa); goto out; } } } mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); if (mad_agent) { trace_ib_mad_recv_done_agent(mad_agent); ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); /* * recv is freed up in error cases in ib_mad_complete_recv * or via recv_handler in ib_mad_complete_recv() */ recv = NULL; } else if ((ret & IB_MAD_RESULT_SUCCESS) && generate_unmatched_resp(recv, response, &mad_size, opa)) { agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, port_priv->device, port_num, qp_info->qp->qp_num, mad_size, opa); } out: /* Post another receive request for this QP */ if (response) { ib_mad_post_receive_mads(qp_info, response); kfree(recv); } else ib_mad_post_receive_mads(qp_info, recv); } static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) { struct ib_mad_send_wr_private *mad_send_wr; unsigned long delay; if (list_empty(&mad_agent_priv->wait_list)) { cancel_delayed_work(&mad_agent_priv->timed_work); } else { mad_send_wr = list_entry(mad_agent_priv->wait_list.next, struct ib_mad_send_wr_private, agent_list); if (time_after(mad_agent_priv->timeout, mad_send_wr->timeout)) { mad_agent_priv->timeout = mad_send_wr->timeout; delay = mad_send_wr->timeout - jiffies; if ((long)delay <= 0) delay = 1; mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, &mad_agent_priv->timed_work, delay); } } } static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_mad_agent_private *mad_agent_priv; unsigned long delay; mad_agent_priv = mad_send_wr->mad_agent_priv; delay = mad_send_wr->timeout; change_mad_state(mad_send_wr, IB_MAD_STATE_WAIT_RESP); /* Reschedule a work item if we have a shorter timeout */ if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, &mad_agent_priv->timed_work, delay); } void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, unsigned long timeout_ms) { mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); wait_for_response(mad_send_wr); } /* * Process a send work completion */ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_send_wc *mad_send_wc) { struct ib_mad_agent_private *mad_agent_priv; unsigned long flags; int ret; mad_agent_priv = mad_send_wr->mad_agent_priv; spin_lock_irqsave(&mad_agent_priv->lock, flags); if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); if (ret == IB_RMPP_RESULT_CONSUMED) goto done; } else ret = IB_RMPP_RESULT_UNHANDLED; if (mad_send_wr->state == IB_MAD_STATE_CANCELED) mad_send_wc->status = IB_WC_WR_FLUSH_ERR; else if (mad_send_wr->state == IB_MAD_STATE_SEND_START && mad_send_wr->timeout) { wait_for_response(mad_send_wr); goto done; } /* Remove send from MAD agent and notify client of completion */ if (mad_send_wr->state != IB_MAD_STATE_DONE) change_mad_state(mad_send_wr, IB_MAD_STATE_DONE); adjust_timeout(mad_agent_priv); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (ret == IB_RMPP_RESULT_INTERNAL) { ib_rmpp_send_handler(mad_send_wc); } else { if (mad_send_wr->is_solicited_fc) process_backlog_mads(mad_agent_priv); mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, mad_send_wc); } /* Release reference on agent taken when sending */ deref_mad_agent(mad_agent_priv); return; done: spin_unlock_irqrestore(&mad_agent_priv->lock, flags); } static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc) { struct ib_mad_port_private *port_priv = cq->cq_context; struct ib_mad_list_head *mad_list = container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; struct ib_mad_qp_info *qp_info; struct ib_mad_queue *send_queue; struct ib_mad_send_wc mad_send_wc; unsigned long flags; int ret; if (list_empty_careful(&port_priv->port_list)) return; if (wc->status != IB_WC_SUCCESS) { if (!ib_mad_send_error(port_priv, wc)) return; } mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, mad_list); send_queue = mad_list->mad_queue; qp_info = send_queue->qp_info; trace_ib_mad_send_done_agent(mad_send_wr->mad_agent_priv); trace_ib_mad_send_done_handler(mad_send_wr, wc); retry: ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, mad_send_wr->header_mapping, mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, mad_send_wr->payload_mapping, mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); queued_send_wr = NULL; spin_lock_irqsave(&send_queue->lock, flags); list_del(&mad_list->list); /* Move queued send to the send queue */ if (send_queue->count-- > send_queue->max_active) { mad_list = container_of(qp_info->overflow_list.next, struct ib_mad_list_head, list); queued_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, mad_list); list_move_tail(&mad_list->list, &send_queue->list); } spin_unlock_irqrestore(&send_queue->lock, flags); mad_send_wc.send_buf = &mad_send_wr->send_buf; mad_send_wc.status = wc->status; mad_send_wc.vendor_err = wc->vendor_err; ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); if (queued_send_wr) { trace_ib_mad_send_done_resend(queued_send_wr, qp_info); ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, NULL); if (ret) { dev_err(&port_priv->device->dev, "ib_post_send failed: %d\n", ret); mad_send_wr = queued_send_wr; wc->status = IB_WC_LOC_QP_OP_ERR; goto retry; } } } static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_list_head *mad_list; unsigned long flags; spin_lock_irqsave(&qp_info->send_queue.lock, flags); list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, mad_list); mad_send_wr->retry = 1; } spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); } static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, struct ib_wc *wc) { struct ib_mad_list_head *mad_list = container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info; struct ib_mad_send_wr_private *mad_send_wr; int ret; /* * Send errors will transition the QP to SQE - move * QP to RTS and repost flushed work requests */ mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, mad_list); if (wc->status == IB_WC_WR_FLUSH_ERR) { if (mad_send_wr->retry) { /* Repost send */ mad_send_wr->retry = 0; trace_ib_mad_error_handler(mad_send_wr, qp_info); ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, NULL); if (!ret) return false; } } else { struct ib_qp_attr *attr; /* Transition QP to RTS and fail offending send */ attr = kmalloc(sizeof *attr, GFP_KERNEL); if (attr) { attr->qp_state = IB_QPS_RTS; attr->cur_qp_state = IB_QPS_SQE; ret = ib_modify_qp(qp_info->qp, attr, IB_QP_STATE | IB_QP_CUR_STATE); kfree(attr); if (ret) dev_err(&port_priv->device->dev, "%s - ib_modify_qp to RTS: %d\n", __func__, ret); else mark_sends_for_retry(qp_info); } } return true; } static void clear_mad_error_list(struct list_head *list, enum ib_wc_status wc_status, struct ib_mad_agent_private *mad_agent_priv) { struct ib_mad_send_wr_private *mad_send_wr, *n; struct ib_mad_send_wc mad_send_wc; mad_send_wc.status = wc_status; mad_send_wc.vendor_err = 0; list_for_each_entry_safe(mad_send_wr, n, list, agent_list) { mad_send_wc.send_buf = &mad_send_wr->send_buf; mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, &mad_send_wc); deref_mad_agent(mad_agent_priv); } } static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) { unsigned long flags; struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; struct list_head cancel_list; INIT_LIST_HEAD(&cancel_list); spin_lock_irqsave(&mad_agent_priv->lock, flags); list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, &mad_agent_priv->send_list, agent_list) change_mad_state(mad_send_wr, IB_MAD_STATE_CANCELED); /* Empty wait & backlog list to prevent receives from finding request */ list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, &mad_agent_priv->wait_list, agent_list) { change_mad_state(mad_send_wr, IB_MAD_STATE_DONE); list_add_tail(&mad_send_wr->agent_list, &cancel_list); } list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, &mad_agent_priv->backlog_list, agent_list) { change_mad_state(mad_send_wr, IB_MAD_STATE_DONE); list_add_tail(&mad_send_wr->agent_list, &cancel_list); } spin_unlock_irqrestore(&mad_agent_priv->lock, flags); /* Report all cancelled requests */ clear_mad_error_list(&cancel_list, IB_WC_WR_FLUSH_ERR, mad_agent_priv); } static struct ib_mad_send_wr_private* find_send_wr(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_buf *send_buf) { struct ib_mad_send_wr_private *mad_send_wr; list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, agent_list) { if (&mad_send_wr->send_buf == send_buf) return mad_send_wr; } list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, agent_list) { if (is_rmpp_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && &mad_send_wr->send_buf == send_buf) return mad_send_wr; } list_for_each_entry(mad_send_wr, &mad_agent_priv->backlog_list, agent_list) { if (&mad_send_wr->send_buf == send_buf) return mad_send_wr; } return NULL; } int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *mad_send_wr; unsigned long flags; int active; if (!send_buf) return -EINVAL; mad_agent_priv = container_of(send_buf->mad_agent, struct ib_mad_agent_private, agent); spin_lock_irqsave(&mad_agent_priv->lock, flags); mad_send_wr = find_send_wr(mad_agent_priv, send_buf); if (!mad_send_wr || mad_send_wr->state == IB_MAD_STATE_CANCELED) { spin_unlock_irqrestore(&mad_agent_priv->lock, flags); return -EINVAL; } active = ((mad_send_wr->state == IB_MAD_STATE_SEND_START) || (mad_send_wr->state == IB_MAD_STATE_EARLY_RESP) || (mad_send_wr->state == IB_MAD_STATE_QUEUED && timeout_ms)); if (!timeout_ms) change_mad_state(mad_send_wr, IB_MAD_STATE_CANCELED); mad_send_wr->send_buf.timeout_ms = timeout_ms; if (active) mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); else ib_reset_mad_timeout(mad_send_wr, timeout_ms); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); return 0; } EXPORT_SYMBOL(ib_modify_mad); static void local_completions(struct work_struct *work) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_local_private *local; struct ib_mad_agent_private *recv_mad_agent; unsigned long flags; int free_mad; struct ib_wc wc; struct ib_mad_send_wc mad_send_wc; bool opa; mad_agent_priv = container_of(work, struct ib_mad_agent_private, local_work); opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, mad_agent_priv->qp_info->port_priv->port_num); spin_lock_irqsave(&mad_agent_priv->lock, flags); while (!list_empty(&mad_agent_priv->local_list)) { local = list_entry(mad_agent_priv->local_list.next, struct ib_mad_local_private, completion_list); list_del(&local->completion_list); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); free_mad = 0; if (local->mad_priv) { u8 base_version; recv_mad_agent = local->recv_mad_agent; if (!recv_mad_agent) { dev_err(&mad_agent_priv->agent.device->dev, "No receive MAD agent for local completion\n"); free_mad = 1; goto local_send_completion; } /* * Defined behavior is to complete response * before request */ build_smp_wc(recv_mad_agent->agent.qp, local->mad_send_wr->send_wr.wr.wr_cqe, be16_to_cpu(IB_LID_PERMISSIVE), local->mad_send_wr->send_wr.pkey_index, recv_mad_agent->agent.port_num, &wc); local->mad_priv->header.recv_wc.wc = &wc; base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version; if (opa && base_version == OPA_MGMT_BASE_VERSION) { local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len; local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); } else { local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad); local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); } INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); list_add(&local->mad_priv->header.recv_wc.recv_buf.list, &local->mad_priv->header.recv_wc.rmpp_list); local->mad_priv->header.recv_wc.recv_buf.grh = NULL; local->mad_priv->header.recv_wc.recv_buf.mad = (struct ib_mad *)local->mad_priv->mad; recv_mad_agent->agent.recv_handler( &recv_mad_agent->agent, &local->mad_send_wr->send_buf, &local->mad_priv->header.recv_wc); spin_lock_irqsave(&recv_mad_agent->lock, flags); deref_mad_agent(recv_mad_agent); spin_unlock_irqrestore(&recv_mad_agent->lock, flags); } local_send_completion: /* Complete send */ mad_send_wc.status = IB_WC_SUCCESS; mad_send_wc.vendor_err = 0; mad_send_wc.send_buf = &local->mad_send_wr->send_buf; mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, &mad_send_wc); spin_lock_irqsave(&mad_agent_priv->lock, flags); deref_mad_agent(mad_agent_priv); if (free_mad) kfree(local->mad_priv); kfree(local); } spin_unlock_irqrestore(&mad_agent_priv->lock, flags); } static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) { int ret; if (!mad_send_wr->retries_left) return -ETIMEDOUT; mad_send_wr->retries_left--; mad_send_wr->send_buf.retries++; mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); if (mad_send_wr->is_solicited_fc && !list_empty(&mad_send_wr->mad_agent_priv->backlog_list)) { change_mad_state(mad_send_wr, IB_MAD_STATE_QUEUED); return 0; } if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { ret = ib_retry_rmpp(mad_send_wr); switch (ret) { case IB_RMPP_RESULT_UNHANDLED: ret = ib_send_mad(mad_send_wr); break; case IB_RMPP_RESULT_CONSUMED: ret = 0; break; default: ret = -ECOMM; break; } } else ret = ib_send_mad(mad_send_wr); if (!ret) change_mad_state(mad_send_wr, IB_MAD_STATE_SEND_START); return ret; } static void timeout_sends(struct work_struct *work) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_agent_private *mad_agent_priv; struct list_head timeout_list; struct list_head cancel_list; struct list_head *list_item; unsigned long flags, delay; mad_agent_priv = container_of(work, struct ib_mad_agent_private, timed_work.work); INIT_LIST_HEAD(&timeout_list); INIT_LIST_HEAD(&cancel_list); spin_lock_irqsave(&mad_agent_priv->lock, flags); while (!list_empty(&mad_agent_priv->wait_list)) { mad_send_wr = list_entry(mad_agent_priv->wait_list.next, struct ib_mad_send_wr_private, agent_list); if (time_after(mad_send_wr->timeout, jiffies)) { delay = mad_send_wr->timeout - jiffies; if ((long)delay <= 0) delay = 1; queue_delayed_work(mad_agent_priv->qp_info-> port_priv->wq, &mad_agent_priv->timed_work, delay); break; } if (mad_send_wr->state == IB_MAD_STATE_CANCELED) list_item = &cancel_list; else if (retry_send(mad_send_wr)) list_item = &timeout_list; else continue; change_mad_state(mad_send_wr, IB_MAD_STATE_DONE); list_add_tail(&mad_send_wr->agent_list, list_item); } spin_unlock_irqrestore(&mad_agent_priv->lock, flags); process_backlog_mads(mad_agent_priv); clear_mad_error_list(&timeout_list, IB_WC_RESP_TIMEOUT_ERR, mad_agent_priv); clear_mad_error_list(&cancel_list, IB_WC_WR_FLUSH_ERR, mad_agent_priv); } /* * Allocate receive MADs and post receive WRs for them */ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, struct ib_mad_private *mad) { unsigned long flags; struct ib_mad_private *mad_priv; struct ib_sge sg_list; struct ib_recv_wr recv_wr; struct ib_mad_queue *recv_queue = &qp_info->recv_queue; int ret = 0; /* Initialize common scatter list fields */ sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; /* Initialize common receive WR fields */ recv_wr.next = NULL; recv_wr.sg_list = &sg_list; recv_wr.num_sge = 1; while (true) { /* Allocate and map receive buffer */ if (mad) { mad_priv = mad; mad = NULL; } else { mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), GFP_ATOMIC); if (!mad_priv) return -ENOMEM; } sg_list.length = mad_priv_dma_size(mad_priv); sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, &mad_priv->grh, mad_priv_dma_size(mad_priv), DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, sg_list.addr))) { ret = -ENOMEM; goto free_mad_priv; } mad_priv->header.mapping = sg_list.addr; mad_priv->header.mad_list.mad_queue = recv_queue; mad_priv->header.mad_list.cqe.done = ib_mad_recv_done; recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; spin_lock_irqsave(&recv_queue->lock, flags); if (recv_queue->count >= recv_queue->max_active) { /* Fully populated the receive queue */ spin_unlock_irqrestore(&recv_queue->lock, flags); break; } recv_queue->count++; list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); spin_unlock_irqrestore(&recv_queue->lock, flags); ret = ib_post_recv(qp_info->qp, &recv_wr, NULL); if (ret) { spin_lock_irqsave(&recv_queue->lock, flags); list_del(&mad_priv->header.mad_list.list); recv_queue->count--; spin_unlock_irqrestore(&recv_queue->lock, flags); dev_err(&qp_info->port_priv->device->dev, "ib_post_recv failed: %d\n", ret); break; } } ib_dma_unmap_single(qp_info->port_priv->device, mad_priv->header.mapping, mad_priv_dma_size(mad_priv), DMA_FROM_DEVICE); free_mad_priv: kfree(mad_priv); return ret; } /* * Return all the posted receive MADs */ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) { struct ib_mad_private_header *mad_priv_hdr; struct ib_mad_private *recv; struct ib_mad_list_head *mad_list; if (!qp_info->qp) return; while (!list_empty(&qp_info->recv_queue.list)) { mad_list = list_entry(qp_info->recv_queue.list.next, struct ib_mad_list_head, list); mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, mad_list); recv = container_of(mad_priv_hdr, struct ib_mad_private, header); /* Remove from posted receive MAD list */ list_del(&mad_list->list); ib_dma_unmap_single(qp_info->port_priv->device, recv->header.mapping, mad_priv_dma_size(recv), DMA_FROM_DEVICE); kfree(recv); } qp_info->recv_queue.count = 0; } /* * Start the port */ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) { int ret, i; struct ib_qp_attr *attr; struct ib_qp *qp; u16 pkey_index; attr = kmalloc(sizeof *attr, GFP_KERNEL); if (!attr) return -ENOMEM; ret = ib_find_pkey(port_priv->device, port_priv->port_num, IB_DEFAULT_PKEY_FULL, &pkey_index); if (ret) pkey_index = 0; for (i = 0; i < IB_MAD_QPS_CORE; i++) { qp = port_priv->qp_info[i].qp; if (!qp) continue; /* * PKey index for QP1 is irrelevant but * one is needed for the Reset to Init transition */ attr->qp_state = IB_QPS_INIT; attr->pkey_index = pkey_index; attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY); if (ret) { dev_err(&port_priv->device->dev, "Couldn't change QP%d state to INIT: %d\n", i, ret); goto out; } attr->qp_state = IB_QPS_RTR; ret = ib_modify_qp(qp, attr, IB_QP_STATE); if (ret) { dev_err(&port_priv->device->dev, "Couldn't change QP%d state to RTR: %d\n", i, ret); goto out; } attr->qp_state = IB_QPS_RTS; attr->sq_psn = IB_MAD_SEND_Q_PSN; ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); if (ret) { dev_err(&port_priv->device->dev, "Couldn't change QP%d state to RTS: %d\n", i, ret); goto out; } } ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); if (ret) { dev_err(&port_priv->device->dev, "Failed to request completion notification: %d\n", ret); goto out; } for (i = 0; i < IB_MAD_QPS_CORE; i++) { if (!port_priv->qp_info[i].qp) continue; ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); if (ret) { dev_err(&port_priv->device->dev, "Couldn't post receive WRs\n"); goto out; } } out: kfree(attr); return ret; } static void qp_event_handler(struct ib_event *event, void *qp_context) { struct ib_mad_qp_info *qp_info = qp_context; /* It's worse than that! He's dead, Jim! */ dev_err(&qp_info->port_priv->device->dev, "Fatal error (%d) on MAD QP (%u)\n", event->event, qp_info->qp->qp_num); } static void init_mad_queue(struct ib_mad_qp_info *qp_info, struct ib_mad_queue *mad_queue) { mad_queue->qp_info = qp_info; mad_queue->count = 0; spin_lock_init(&mad_queue->lock); INIT_LIST_HEAD(&mad_queue->list); } static void init_mad_qp(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info) { qp_info->port_priv = port_priv; init_mad_queue(qp_info, &qp_info->send_queue); init_mad_queue(qp_info, &qp_info->recv_queue); INIT_LIST_HEAD(&qp_info->overflow_list); } static int create_mad_qp(struct ib_mad_qp_info *qp_info, enum ib_qp_type qp_type) { struct ib_qp_init_attr qp_init_attr; int ret; memset(&qp_init_attr, 0, sizeof qp_init_attr); qp_init_attr.send_cq = qp_info->port_priv->cq; qp_init_attr.recv_cq = qp_info->port_priv->cq; qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; qp_init_attr.cap.max_send_wr = mad_sendq_size; qp_init_attr.cap.max_recv_wr = mad_recvq_size; qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; qp_init_attr.qp_type = qp_type; qp_init_attr.port_num = qp_info->port_priv->port_num; qp_init_attr.qp_context = qp_info; qp_init_attr.event_handler = qp_event_handler; qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); if (IS_ERR(qp_info->qp)) { dev_err(&qp_info->port_priv->device->dev, "Couldn't create ib_mad QP%d\n", get_spl_qp_index(qp_type)); ret = PTR_ERR(qp_info->qp); goto error; } /* Use minimum queue sizes unless the CQ is resized */ qp_info->send_queue.max_active = mad_sendq_size; qp_info->recv_queue.max_active = mad_recvq_size; return 0; error: return ret; } static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) { if (!qp_info->qp) return; ib_destroy_qp(qp_info->qp); } /* * Open the port * Create the QP, PD, MR, and CQ if needed */ static int ib_mad_port_open(struct ib_device *device, u32 port_num) { int ret, cq_size; struct ib_mad_port_private *port_priv; unsigned long flags; int has_smi; if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE)) return -EFAULT; if (WARN_ON(rdma_cap_opa_mad(device, port_num) && rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE)) return -EFAULT; /* Create new device info */ port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); if (!port_priv) return -ENOMEM; port_priv->device = device; port_priv->port_num = port_num; spin_lock_init(&port_priv->reg_lock); init_mad_qp(port_priv, &port_priv->qp_info[0]); init_mad_qp(port_priv, &port_priv->qp_info[1]); cq_size = mad_sendq_size + mad_recvq_size; has_smi = rdma_cap_ib_smi(device, port_num); if (has_smi) cq_size *= 2; port_priv->pd = ib_alloc_pd(device, 0); if (IS_ERR(port_priv->pd)) { dev_err(&device->dev, "Couldn't create ib_mad PD\n"); ret = PTR_ERR(port_priv->pd); goto error3; } port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, IB_POLL_UNBOUND_WORKQUEUE); if (IS_ERR(port_priv->cq)) { dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); ret = PTR_ERR(port_priv->cq); goto error4; } if (has_smi) { ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); if (ret) goto error6; } if (rdma_cap_ib_cm(device, port_num)) { ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); if (ret) goto error7; } port_priv->wq = alloc_ordered_workqueue("ib_mad%u", WQ_MEM_RECLAIM, port_num); if (!port_priv->wq) { ret = -ENOMEM; goto error8; } spin_lock_irqsave(&ib_mad_port_list_lock, flags); list_add_tail(&port_priv->port_list, &ib_mad_port_list); spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); ret = ib_mad_port_start(port_priv); if (ret) { dev_err(&device->dev, "Couldn't start port\n"); goto error9; } return 0; error9: spin_lock_irqsave(&ib_mad_port_list_lock, flags); list_del_init(&port_priv->port_list); spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); destroy_workqueue(port_priv->wq); error8: destroy_mad_qp(&port_priv->qp_info[1]); error7: destroy_mad_qp(&port_priv->qp_info[0]); error6: ib_free_cq(port_priv->cq); cleanup_recv_queue(&port_priv->qp_info[1]); cleanup_recv_queue(&port_priv->qp_info[0]); error4: ib_dealloc_pd(port_priv->pd); error3: kfree(port_priv); return ret; } /* * Close the port * If there are no classes using the port, free the port * resources (CQ, MR, PD, QP) and remove the port's info structure */ static int ib_mad_port_close(struct ib_device *device, u32 port_num) { struct ib_mad_port_private *port_priv; unsigned long flags; spin_lock_irqsave(&ib_mad_port_list_lock, flags); port_priv = __ib_get_mad_port(device, port_num); if (port_priv == NULL) { spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); dev_err(&device->dev, "Port %u not found\n", port_num); return -ENODEV; } list_del_init(&port_priv->port_list); spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); destroy_workqueue(port_priv->wq); destroy_mad_qp(&port_priv->qp_info[1]); destroy_mad_qp(&port_priv->qp_info[0]); ib_free_cq(port_priv->cq); ib_dealloc_pd(port_priv->pd); cleanup_recv_queue(&port_priv->qp_info[1]); cleanup_recv_queue(&port_priv->qp_info[0]); /* XXX: Handle deallocation of MAD registration tables */ kfree(port_priv); return 0; } static int ib_mad_init_device(struct ib_device *device) { int start, i; unsigned int count = 0; int ret; start = rdma_start_port(device); for (i = start; i <= rdma_end_port(device); i++) { if (!rdma_cap_ib_mad(device, i)) continue; ret = ib_mad_port_open(device, i); if (ret) { dev_err(&device->dev, "Couldn't open port %d\n", i); goto error; } ret = ib_agent_port_open(device, i); if (ret) { dev_err(&device->dev, "Couldn't open port %d for agents\n", i); goto error_agent; } count++; } if (!count) return -EOPNOTSUPP; return 0; error_agent: if (ib_mad_port_close(device, i)) dev_err(&device->dev, "Couldn't close port %d\n", i); error: while (--i >= start) { if (!rdma_cap_ib_mad(device, i)) continue; if (ib_agent_port_close(device, i)) dev_err(&device->dev, "Couldn't close port %d for agents\n", i); if (ib_mad_port_close(device, i)) dev_err(&device->dev, "Couldn't close port %d\n", i); } return ret; } static void ib_mad_remove_device(struct ib_device *device, void *client_data) { unsigned int i; rdma_for_each_port (device, i) { if (!rdma_cap_ib_mad(device, i)) continue; if (ib_agent_port_close(device, i)) dev_err(&device->dev, "Couldn't close port %u for agents\n", i); if (ib_mad_port_close(device, i)) dev_err(&device->dev, "Couldn't close port %u\n", i); } } static struct ib_client mad_client = { .name = "mad", .add = ib_mad_init_device, .remove = ib_mad_remove_device }; int ib_mad_init(void) { mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); INIT_LIST_HEAD(&ib_mad_port_list); if (ib_register_client(&mad_client)) { pr_err("Couldn't register ib_mad client\n"); return -EINVAL; } return 0; } void ib_mad_cleanup(void) { ib_unregister_client(&mad_client); } |
| 4 1 1 2 2 2 1 1 1 1 3 8 1 7 2 1 1 2 2 2 3 3 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 | // SPDX-License-Identifier: GPL-2.0-only /* * VFIO-KVM bridge pseudo device * * Copyright (C) 2013 Red Hat, Inc. All rights reserved. * Author: Alex Williamson <alex.williamson@redhat.com> */ #include <linux/errno.h> #include <linux/file.h> #include <linux/kvm_host.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/vfio.h> #include "vfio.h" #ifdef CONFIG_SPAPR_TCE_IOMMU #include <asm/kvm_ppc.h> #endif struct kvm_vfio_file { struct list_head node; struct file *file; #ifdef CONFIG_SPAPR_TCE_IOMMU struct iommu_group *iommu_group; #endif }; struct kvm_vfio { struct list_head file_list; struct mutex lock; bool noncoherent; }; static void kvm_vfio_file_set_kvm(struct file *file, struct kvm *kvm) { void (*fn)(struct file *file, struct kvm *kvm); fn = symbol_get(vfio_file_set_kvm); if (!fn) return; fn(file, kvm); symbol_put(vfio_file_set_kvm); } static bool kvm_vfio_file_enforced_coherent(struct file *file) { bool (*fn)(struct file *file); bool ret; fn = symbol_get(vfio_file_enforced_coherent); if (!fn) return false; ret = fn(file); symbol_put(vfio_file_enforced_coherent); return ret; } static bool kvm_vfio_file_is_valid(struct file *file) { bool (*fn)(struct file *file); bool ret; fn = symbol_get(vfio_file_is_valid); if (!fn) return false; ret = fn(file); symbol_put(vfio_file_is_valid); return ret; } #ifdef CONFIG_SPAPR_TCE_IOMMU static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file) { struct iommu_group *(*fn)(struct file *file); struct iommu_group *ret; fn = symbol_get(vfio_file_iommu_group); if (!fn) return NULL; ret = fn(file); symbol_put(vfio_file_iommu_group); return ret; } static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm, struct kvm_vfio_file *kvf) { if (WARN_ON_ONCE(!kvf->iommu_group)) return; kvm_spapr_tce_release_iommu_group(kvm, kvf->iommu_group); iommu_group_put(kvf->iommu_group); kvf->iommu_group = NULL; } #endif /* * Groups/devices can use the same or different IOMMU domains. If the same * then adding a new group/device may change the coherency of groups/devices * we've previously been told about. We don't want to care about any of * that so we retest each group/device and bail as soon as we find one that's * noncoherent. This means we only ever [un]register_noncoherent_dma once * for the whole device. */ static void kvm_vfio_update_coherency(struct kvm_device *dev) { struct kvm_vfio *kv = dev->private; bool noncoherent = false; struct kvm_vfio_file *kvf; list_for_each_entry(kvf, &kv->file_list, node) { if (!kvm_vfio_file_enforced_coherent(kvf->file)) { noncoherent = true; break; } } if (noncoherent != kv->noncoherent) { kv->noncoherent = noncoherent; if (kv->noncoherent) kvm_arch_register_noncoherent_dma(dev->kvm); else kvm_arch_unregister_noncoherent_dma(dev->kvm); } } static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd) { struct kvm_vfio *kv = dev->private; struct kvm_vfio_file *kvf; struct file *filp; int ret = 0; filp = fget(fd); if (!filp) return -EBADF; /* Ensure the FD is a vfio FD. */ if (!kvm_vfio_file_is_valid(filp)) { ret = -EINVAL; goto out_fput; } mutex_lock(&kv->lock); list_for_each_entry(kvf, &kv->file_list, node) { if (kvf->file == filp) { ret = -EEXIST; goto out_unlock; } } kvf = kzalloc(sizeof(*kvf), GFP_KERNEL_ACCOUNT); if (!kvf) { ret = -ENOMEM; goto out_unlock; } kvf->file = get_file(filp); list_add_tail(&kvf->node, &kv->file_list); kvm_vfio_file_set_kvm(kvf->file, dev->kvm); kvm_vfio_update_coherency(dev); out_unlock: mutex_unlock(&kv->lock); out_fput: fput(filp); return ret; } static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd) { struct kvm_vfio *kv = dev->private; struct kvm_vfio_file *kvf; CLASS(fd, f)(fd); int ret; if (fd_empty(f)) return -EBADF; ret = -ENOENT; mutex_lock(&kv->lock); list_for_each_entry(kvf, &kv->file_list, node) { if (kvf->file != fd_file(f)) continue; list_del(&kvf->node); #ifdef CONFIG_SPAPR_TCE_IOMMU kvm_spapr_tce_release_vfio_group(dev->kvm, kvf); #endif kvm_vfio_file_set_kvm(kvf->file, NULL); fput(kvf->file); kfree(kvf); ret = 0; break; } kvm_vfio_update_coherency(dev); mutex_unlock(&kv->lock); return ret; } #ifdef CONFIG_SPAPR_TCE_IOMMU static int kvm_vfio_file_set_spapr_tce(struct kvm_device *dev, void __user *arg) { struct kvm_vfio_spapr_tce param; struct kvm_vfio *kv = dev->private; struct kvm_vfio_file *kvf; int ret; if (copy_from_user(¶m, arg, sizeof(struct kvm_vfio_spapr_tce))) return -EFAULT; CLASS(fd, f)(param.groupfd); if (fd_empty(f)) return -EBADF; ret = -ENOENT; mutex_lock(&kv->lock); list_for_each_entry(kvf, &kv->file_list, node) { if (kvf->file != fd_file(f)) continue; if (!kvf->iommu_group) { kvf->iommu_group = kvm_vfio_file_iommu_group(kvf->file); if (WARN_ON_ONCE(!kvf->iommu_group)) { ret = -EIO; goto err_fdput; } } ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd, kvf->iommu_group); break; } err_fdput: mutex_unlock(&kv->lock); return ret; } #endif static int kvm_vfio_set_file(struct kvm_device *dev, long attr, void __user *arg) { int32_t __user *argp = arg; int32_t fd; switch (attr) { case KVM_DEV_VFIO_FILE_ADD: if (get_user(fd, argp)) return -EFAULT; return kvm_vfio_file_add(dev, fd); case KVM_DEV_VFIO_FILE_DEL: if (get_user(fd, argp)) return -EFAULT; return kvm_vfio_file_del(dev, fd); #ifdef CONFIG_SPAPR_TCE_IOMMU case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: return kvm_vfio_file_set_spapr_tce(dev, arg); #endif } return -ENXIO; } static int kvm_vfio_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { switch (attr->group) { case KVM_DEV_VFIO_FILE: return kvm_vfio_set_file(dev, attr->attr, u64_to_user_ptr(attr->addr)); } return -ENXIO; } static int kvm_vfio_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { switch (attr->group) { case KVM_DEV_VFIO_FILE: switch (attr->attr) { case KVM_DEV_VFIO_FILE_ADD: case KVM_DEV_VFIO_FILE_DEL: #ifdef CONFIG_SPAPR_TCE_IOMMU case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: #endif return 0; } break; } return -ENXIO; } static void kvm_vfio_release(struct kvm_device *dev) { struct kvm_vfio *kv = dev->private; struct kvm_vfio_file *kvf, *tmp; list_for_each_entry_safe(kvf, tmp, &kv->file_list, node) { #ifdef CONFIG_SPAPR_TCE_IOMMU kvm_spapr_tce_release_vfio_group(dev->kvm, kvf); #endif kvm_vfio_file_set_kvm(kvf->file, NULL); fput(kvf->file); list_del(&kvf->node); kfree(kvf); } kvm_vfio_update_coherency(dev); kfree(kv); kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */ } static int kvm_vfio_create(struct kvm_device *dev, u32 type); static const struct kvm_device_ops kvm_vfio_ops = { .name = "kvm-vfio", .create = kvm_vfio_create, .release = kvm_vfio_release, .set_attr = kvm_vfio_set_attr, .has_attr = kvm_vfio_has_attr, }; static int kvm_vfio_create(struct kvm_device *dev, u32 type) { struct kvm_device *tmp; struct kvm_vfio *kv; lockdep_assert_held(&dev->kvm->lock); /* Only one VFIO "device" per VM */ list_for_each_entry(tmp, &dev->kvm->devices, vm_node) if (tmp->ops == &kvm_vfio_ops) return -EBUSY; kv = kzalloc(sizeof(*kv), GFP_KERNEL_ACCOUNT); if (!kv) return -ENOMEM; INIT_LIST_HEAD(&kv->file_list); mutex_init(&kv->lock); dev->private = kv; return 0; } int kvm_vfio_ops_init(void) { return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO); } void kvm_vfio_ops_exit(void) { kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO); } |
| 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 | /* Kernel module to match connection tracking byte counter. * GPL (C) 2002 Martin Devera (devik@cdi.cz). */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/bitops.h> #include <linux/skbuff.h> #include <linux/math64.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_connbytes.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_acct.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_DESCRIPTION("Xtables: Number of packets/bytes per connection matching"); MODULE_ALIAS("ipt_connbytes"); MODULE_ALIAS("ip6t_connbytes"); static bool connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_connbytes_info *sinfo = par->matchinfo; const struct nf_conn *ct; enum ip_conntrack_info ctinfo; u_int64_t what = 0; /* initialize to make gcc happy */ u_int64_t bytes = 0; u_int64_t pkts = 0; const struct nf_conn_acct *acct; const struct nf_conn_counter *counters; ct = nf_ct_get(skb, &ctinfo); if (!ct) return false; acct = nf_conn_acct_find(ct); if (!acct) return false; counters = acct->counter; switch (sinfo->what) { case XT_CONNBYTES_PKTS: switch (sinfo->direction) { case XT_CONNBYTES_DIR_ORIGINAL: what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets); break; case XT_CONNBYTES_DIR_REPLY: what = atomic64_read(&counters[IP_CT_DIR_REPLY].packets); break; case XT_CONNBYTES_DIR_BOTH: what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets); what += atomic64_read(&counters[IP_CT_DIR_REPLY].packets); break; } break; case XT_CONNBYTES_BYTES: switch (sinfo->direction) { case XT_CONNBYTES_DIR_ORIGINAL: what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); break; case XT_CONNBYTES_DIR_REPLY: what = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); break; case XT_CONNBYTES_DIR_BOTH: what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); what += atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); break; } break; case XT_CONNBYTES_AVGPKT: switch (sinfo->direction) { case XT_CONNBYTES_DIR_ORIGINAL: bytes = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); pkts = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets); break; case XT_CONNBYTES_DIR_REPLY: bytes = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); pkts = atomic64_read(&counters[IP_CT_DIR_REPLY].packets); break; case XT_CONNBYTES_DIR_BOTH: bytes = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes) + atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); pkts = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets) + atomic64_read(&counters[IP_CT_DIR_REPLY].packets); break; } if (pkts != 0) what = div64_u64(bytes, pkts); break; } if (sinfo->count.to >= sinfo->count.from) return what <= sinfo->count.to && what >= sinfo->count.from; else /* inverted */ return what < sinfo->count.to || what > sinfo->count.from; } static int connbytes_mt_check(const struct xt_mtchk_param *par) { const struct xt_connbytes_info *sinfo = par->matchinfo; int ret; if (sinfo->what != XT_CONNBYTES_PKTS && sinfo->what != XT_CONNBYTES_BYTES && sinfo->what != XT_CONNBYTES_AVGPKT) return -EINVAL; if (sinfo->direction != XT_CONNBYTES_DIR_ORIGINAL && sinfo->direction != XT_CONNBYTES_DIR_REPLY && sinfo->direction != XT_CONNBYTES_DIR_BOTH) return -EINVAL; ret = nf_ct_netns_get(par->net, par->family); if (ret < 0) { pr_info_ratelimited("cannot load conntrack support for proto=%u\n", par->family); return ret; } /* * This filter cannot function correctly unless connection tracking * accounting is enabled, so complain in the hope that someone notices. */ if (!nf_ct_acct_enabled(par->net)) { pr_warn("Forcing CT accounting to be enabled\n"); nf_ct_set_acct(par->net, true); } return ret; } static void connbytes_mt_destroy(const struct xt_mtdtor_param *par) { nf_ct_netns_put(par->net, par->family); } static struct xt_match connbytes_mt_reg __read_mostly = { .name = "connbytes", .revision = 0, .family = NFPROTO_UNSPEC, .checkentry = connbytes_mt_check, .match = connbytes_mt, .destroy = connbytes_mt_destroy, .matchsize = sizeof(struct xt_connbytes_info), .me = THIS_MODULE, }; static int __init connbytes_mt_init(void) { return xt_register_match(&connbytes_mt_reg); } static void __exit connbytes_mt_exit(void) { xt_unregister_match(&connbytes_mt_reg); } module_init(connbytes_mt_init); module_exit(connbytes_mt_exit); |
| 1 1 1 1 1 1 1 1 1 1 1 1 1 1 140 141 140 141 140 139 106 103 45 96 95 94 96 94 96 96 95 1 1 1 1 1 137 139 26 26 26 256 255 140 1 140 140 141 140 141 139 141 141 141 141 21 5 17 111 92 93 93 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 | // SPDX-License-Identifier: GPL-2.0-or-later #include <net/gro.h> #include <net/dst_metadata.h> #include <net/busy_poll.h> #include <trace/events/net.h> #include <linux/skbuff_ref.h> #define MAX_GRO_SKBS 8 static DEFINE_SPINLOCK(offload_lock); /** * dev_add_offload - register offload handlers * @po: protocol offload declaration * * Add protocol offload handlers to the networking stack. The passed * &proto_offload is linked into kernel lists and may not be freed until * it has been removed from the kernel lists. * * This call does not sleep therefore it can not * guarantee all CPU's that are in middle of receiving packets * will see the new offload handlers (until the next received packet). */ void dev_add_offload(struct packet_offload *po) { struct packet_offload *elem; spin_lock(&offload_lock); list_for_each_entry(elem, &net_hotdata.offload_base, list) { if (po->priority < elem->priority) break; } list_add_rcu(&po->list, elem->list.prev); spin_unlock(&offload_lock); } EXPORT_SYMBOL(dev_add_offload); /** * __dev_remove_offload - remove offload handler * @po: packet offload declaration * * Remove a protocol offload handler that was previously added to the * kernel offload handlers by dev_add_offload(). The passed &offload_type * is removed from the kernel lists and can be freed or reused once this * function returns. * * The packet type might still be in use by receivers * and must not be freed until after all the CPU's have gone * through a quiescent state. */ static void __dev_remove_offload(struct packet_offload *po) { struct list_head *head = &net_hotdata.offload_base; struct packet_offload *po1; spin_lock(&offload_lock); list_for_each_entry(po1, head, list) { if (po == po1) { list_del_rcu(&po->list); goto out; } } pr_warn("dev_remove_offload: %p not found\n", po); out: spin_unlock(&offload_lock); } /** * dev_remove_offload - remove packet offload handler * @po: packet offload declaration * * Remove a packet offload handler that was previously added to the kernel * offload handlers by dev_add_offload(). The passed &offload_type is * removed from the kernel lists and can be freed or reused once this * function returns. * * This call sleeps to guarantee that no CPU is looking at the packet * type after return. */ void dev_remove_offload(struct packet_offload *po) { __dev_remove_offload(po); synchronize_net(); } EXPORT_SYMBOL(dev_remove_offload); int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) { struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); unsigned int offset = skb_gro_offset(skb); unsigned int headlen = skb_headlen(skb); unsigned int len = skb_gro_len(skb); unsigned int delta_truesize; unsigned int new_truesize; struct sk_buff *lp; int segs; /* Do not splice page pool based packets w/ non-page pool * packets. This can result in reference count issues as page * pool pages will not decrement the reference count and will * instead be immediately returned to the pool or have frag * count decremented. */ if (p->pp_recycle != skb->pp_recycle) return -ETOOMANYREFS; if (unlikely(p->len + len >= netif_get_gro_max_size(p->dev, p) || NAPI_GRO_CB(skb)->flush)) return -E2BIG; if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) { if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP || (p->protocol == htons(ETH_P_IPV6) && skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) || p->encapsulation) return -E2BIG; } segs = NAPI_GRO_CB(skb)->count; lp = NAPI_GRO_CB(p)->last; pinfo = skb_shinfo(lp); if (headlen <= offset) { skb_frag_t *frag; skb_frag_t *frag2; int i = skbinfo->nr_frags; int nr_frags = pinfo->nr_frags + i; if (nr_frags > MAX_SKB_FRAGS) goto merge; offset -= headlen; pinfo->nr_frags = nr_frags; skbinfo->nr_frags = 0; frag = pinfo->frags + nr_frags; frag2 = skbinfo->frags + i; do { *--frag = *--frag2; } while (--i); skb_frag_off_add(frag, offset); skb_frag_size_sub(frag, offset); /* all fragments truesize : remove (head size + sk_buff) */ new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); delta_truesize = skb->truesize - new_truesize; skb->truesize = new_truesize; skb->len -= skb->data_len; skb->data_len = 0; NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; goto done; } else if (skb->head_frag) { int nr_frags = pinfo->nr_frags; skb_frag_t *frag = pinfo->frags + nr_frags; struct page *page = virt_to_head_page(skb->head); unsigned int first_size = headlen - offset; unsigned int first_offset; if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) goto merge; first_offset = skb->data - (unsigned char *)page_address(page) + offset; pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; skb_frag_fill_page_desc(frag, page, first_offset, first_size); memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); /* We dont need to clear skbinfo->nr_frags here */ new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff)); delta_truesize = skb->truesize - new_truesize; skb->truesize = new_truesize; NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; goto done; } merge: /* sk ownership - if any - completely transferred to the aggregated packet */ skb->destructor = NULL; skb->sk = NULL; delta_truesize = skb->truesize; if (offset > headlen) { unsigned int eat = offset - headlen; skb_frag_off_add(&skbinfo->frags[0], eat); skb_frag_size_sub(&skbinfo->frags[0], eat); skb->data_len -= eat; skb->len -= eat; offset = headlen; } __skb_pull(skb, offset); if (NAPI_GRO_CB(p)->last == p) skb_shinfo(p)->frag_list = skb; else NAPI_GRO_CB(p)->last->next = skb; NAPI_GRO_CB(p)->last = skb; __skb_header_release(skb); lp = p; done: NAPI_GRO_CB(p)->count += segs; p->data_len += len; p->truesize += delta_truesize; p->len += len; if (lp != p) { lp->data_len += len; lp->truesize += delta_truesize; lp->len += len; } NAPI_GRO_CB(skb)->same_flow = 1; return 0; } int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) { if (unlikely(p->len + skb->len >= 65536)) return -E2BIG; if (NAPI_GRO_CB(p)->last == p) skb_shinfo(p)->frag_list = skb; else NAPI_GRO_CB(p)->last->next = skb; skb_pull(skb, skb_gro_offset(skb)); NAPI_GRO_CB(p)->last = skb; NAPI_GRO_CB(p)->count++; p->data_len += skb->len; /* sk ownership - if any - completely transferred to the aggregated packet */ skb->destructor = NULL; skb->sk = NULL; p->truesize += skb->truesize; p->len += skb->len; NAPI_GRO_CB(skb)->same_flow = 1; return 0; } static void gro_complete(struct gro_node *gro, struct sk_buff *skb) { struct list_head *head = &net_hotdata.offload_base; struct packet_offload *ptype; __be16 type = skb->protocol; int err = -ENOENT; BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); if (NAPI_GRO_CB(skb)->count == 1) { skb_shinfo(skb)->gso_size = 0; goto out; } rcu_read_lock(); list_for_each_entry_rcu(ptype, head, list) { if (ptype->type != type || !ptype->callbacks.gro_complete) continue; err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, ipv6_gro_complete, inet_gro_complete, skb, 0); break; } rcu_read_unlock(); if (err) { WARN_ON(&ptype->list == head); kfree_skb(skb); return; } out: gro_normal_one(gro, skb, NAPI_GRO_CB(skb)->count); } static void __gro_flush_chain(struct gro_node *gro, u32 index, bool flush_old) { struct list_head *head = &gro->hash[index].list; struct sk_buff *skb, *p; list_for_each_entry_safe_reverse(skb, p, head, list) { if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) return; skb_list_del_init(skb); gro_complete(gro, skb); gro->hash[index].count--; } if (!gro->hash[index].count) __clear_bit(index, &gro->bitmask); } /* * gro->hash[].list contains packets ordered by age. * youngest packets at the head of it. * Complete skbs in reverse order to reduce latencies. */ void __gro_flush(struct gro_node *gro, bool flush_old) { unsigned long bitmask = gro->bitmask; unsigned int i, base = ~0U; while ((i = ffs(bitmask)) != 0) { bitmask >>= i; base += i; __gro_flush_chain(gro, base, flush_old); } } EXPORT_SYMBOL(__gro_flush); static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb, const struct sk_buff *p, unsigned long diffs) { #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) struct tc_skb_ext *skb_ext; struct tc_skb_ext *p_ext; skb_ext = skb_ext_find(skb, TC_SKB_EXT); p_ext = skb_ext_find(p, TC_SKB_EXT); diffs |= (!!p_ext) ^ (!!skb_ext); if (!diffs && unlikely(skb_ext)) diffs |= p_ext->chain ^ skb_ext->chain; #endif return diffs; } static void gro_list_prepare(const struct list_head *head, const struct sk_buff *skb) { unsigned int maclen = skb->dev->hard_header_len; u32 hash = skb_get_hash_raw(skb); struct sk_buff *p; list_for_each_entry(p, head, list) { unsigned long diffs; if (hash != skb_get_hash_raw(p)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; diffs |= p->vlan_all ^ skb->vlan_all; diffs |= skb_metadata_differs(p, skb); if (maclen == ETH_HLEN) diffs |= compare_ether_header(skb_mac_header(p), skb_mac_header(skb)); else if (!diffs) diffs = memcmp(skb_mac_header(p), skb_mac_header(skb), maclen); /* in most common scenarios 'slow_gro' is 0 * otherwise we are already on some slower paths * either skip all the infrequent tests altogether or * avoid trying too hard to skip each of them individually */ if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) { diffs |= p->sk != skb->sk; diffs |= skb_metadata_dst_cmp(p, skb); diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); diffs |= gro_list_prepare_tc_ext(skb, p, diffs); } NAPI_GRO_CB(p)->same_flow = !diffs; } } static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff) { const struct skb_shared_info *pinfo; const skb_frag_t *frag0; unsigned int headlen; NAPI_GRO_CB(skb)->network_offset = 0; NAPI_GRO_CB(skb)->data_offset = 0; headlen = skb_headlen(skb); NAPI_GRO_CB(skb)->frag0 = skb->data; NAPI_GRO_CB(skb)->frag0_len = headlen; if (headlen) return; pinfo = skb_shinfo(skb); frag0 = &pinfo->frags[0]; if (pinfo->nr_frags && skb_frag_page(frag0) && !PageHighMem(skb_frag_page(frag0)) && (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) { NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, skb_frag_size(frag0), skb->end - skb->tail); } } static void gro_pull_from_frag0(struct sk_buff *skb, int grow) { struct skb_shared_info *pinfo = skb_shinfo(skb); BUG_ON(skb->end - skb->tail < grow); memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); skb->data_len -= grow; skb->tail += grow; skb_frag_off_add(&pinfo->frags[0], grow); skb_frag_size_sub(&pinfo->frags[0], grow); if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { skb_frag_unref(skb, 0); memmove(pinfo->frags, pinfo->frags + 1, --pinfo->nr_frags * sizeof(pinfo->frags[0])); } } static void gro_try_pull_from_frag0(struct sk_buff *skb) { int grow = skb_gro_offset(skb) - skb_headlen(skb); if (grow > 0) gro_pull_from_frag0(skb, grow); } static void gro_flush_oldest(struct gro_node *gro, struct list_head *head) { struct sk_buff *oldest; oldest = list_last_entry(head, struct sk_buff, list); /* We are called with head length >= MAX_GRO_SKBS, so this is * impossible. */ if (WARN_ON_ONCE(!oldest)) return; /* Do not adjust napi->gro_hash[].count, caller is adding a new * SKB to the chain. */ skb_list_del_init(oldest); gro_complete(gro, oldest); } static enum gro_result dev_gro_receive(struct gro_node *gro, struct sk_buff *skb) { u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); struct list_head *head = &net_hotdata.offload_base; struct gro_list *gro_list = &gro->hash[bucket]; struct packet_offload *ptype; __be16 type = skb->protocol; struct sk_buff *pp = NULL; enum gro_result ret; int same_flow; if (netif_elide_gro(skb->dev)) goto normal; gro_list_prepare(&gro_list->list, skb); rcu_read_lock(); list_for_each_entry_rcu(ptype, head, list) { if (ptype->type == type && ptype->callbacks.gro_receive) goto found_ptype; } rcu_read_unlock(); goto normal; found_ptype: skb_set_network_header(skb, skb_gro_offset(skb)); skb_reset_mac_len(skb); BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32)); BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed), sizeof(u32))); /* Avoid slow unaligned acc */ *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0; NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb); NAPI_GRO_CB(skb)->count = 1; if (unlikely(skb_is_gso(skb))) { NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs; /* Only support TCP and non DODGY users. */ if (!skb_is_gso_tcp(skb) || (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY)) NAPI_GRO_CB(skb)->flush = 1; } /* Setup for GRO checksum validation */ switch (skb->ip_summed) { case CHECKSUM_COMPLETE: NAPI_GRO_CB(skb)->csum = skb->csum; NAPI_GRO_CB(skb)->csum_valid = 1; break; case CHECKSUM_UNNECESSARY: NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; break; } pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, ipv6_gro_receive, inet_gro_receive, &gro_list->list, skb); rcu_read_unlock(); if (PTR_ERR(pp) == -EINPROGRESS) { ret = GRO_CONSUMED; goto ok; } same_flow = NAPI_GRO_CB(skb)->same_flow; ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; if (pp) { skb_list_del_init(pp); gro_complete(gro, pp); gro_list->count--; } if (same_flow) goto ok; if (NAPI_GRO_CB(skb)->flush) goto normal; if (unlikely(gro_list->count >= MAX_GRO_SKBS)) gro_flush_oldest(gro, &gro_list->list); else gro_list->count++; /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */ gro_try_pull_from_frag0(skb); NAPI_GRO_CB(skb)->age = jiffies; NAPI_GRO_CB(skb)->last = skb; if (!skb_is_gso(skb)) skb_shinfo(skb)->gso_size = skb_gro_len(skb); list_add(&skb->list, &gro_list->list); ret = GRO_HELD; ok: if (gro_list->count) { if (!test_bit(bucket, &gro->bitmask)) __set_bit(bucket, &gro->bitmask); } else if (test_bit(bucket, &gro->bitmask)) { __clear_bit(bucket, &gro->bitmask); } return ret; normal: ret = GRO_NORMAL; gro_try_pull_from_frag0(skb); goto ok; } struct packet_offload *gro_find_receive_by_type(__be16 type) { struct list_head *offload_head = &net_hotdata.offload_base; struct packet_offload *ptype; list_for_each_entry_rcu(ptype, offload_head, list) { if (ptype->type != type || !ptype->callbacks.gro_receive) continue; return ptype; } return NULL; } EXPORT_SYMBOL(gro_find_receive_by_type); struct packet_offload *gro_find_complete_by_type(__be16 type) { struct list_head *offload_head = &net_hotdata.offload_base; struct packet_offload *ptype; list_for_each_entry_rcu(ptype, offload_head, list) { if (ptype->type != type || !ptype->callbacks.gro_complete) continue; return ptype; } return NULL; } EXPORT_SYMBOL(gro_find_complete_by_type); static gro_result_t gro_skb_finish(struct gro_node *gro, struct sk_buff *skb, gro_result_t ret) { switch (ret) { case GRO_NORMAL: gro_normal_one(gro, skb, 1); break; case GRO_MERGED_FREE: if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) napi_skb_free_stolen_head(skb); else if (skb->fclone != SKB_FCLONE_UNAVAILABLE) __kfree_skb(skb); else __napi_kfree_skb(skb, SKB_CONSUMED); break; case GRO_HELD: case GRO_MERGED: case GRO_CONSUMED: break; } return ret; } gro_result_t gro_receive_skb(struct gro_node *gro, struct sk_buff *skb) { gro_result_t ret; __skb_mark_napi_id(skb, gro); trace_napi_gro_receive_entry(skb); skb_gro_reset_offset(skb, 0); ret = gro_skb_finish(gro, skb, dev_gro_receive(gro, skb)); trace_napi_gro_receive_exit(ret); return ret; } EXPORT_SYMBOL(gro_receive_skb); static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) { if (unlikely(skb->pfmemalloc)) { consume_skb(skb); return; } __skb_pull(skb, skb_headlen(skb)); /* restore the reserve we had after netdev_alloc_skb_ip_align() */ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); __vlan_hwaccel_clear_tag(skb); skb->dev = napi->dev; skb->skb_iif = 0; /* eth_type_trans() assumes pkt_type is PACKET_HOST */ skb->pkt_type = PACKET_HOST; skb->encapsulation = 0; skb->ip_summed = CHECKSUM_NONE; skb_shinfo(skb)->gso_type = 0; skb_shinfo(skb)->gso_size = 0; if (unlikely(skb->slow_gro)) { skb_orphan(skb); skb_ext_reset(skb); nf_reset_ct(skb); skb->slow_gro = 0; } napi->skb = skb; } struct sk_buff *napi_get_frags(struct napi_struct *napi) { struct sk_buff *skb = napi->skb; if (!skb) { skb = napi_alloc_skb(napi, GRO_MAX_HEAD); if (skb) { napi->skb = skb; skb_mark_napi_id(skb, napi); } } return skb; } EXPORT_SYMBOL(napi_get_frags); static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, gro_result_t ret) { switch (ret) { case GRO_NORMAL: case GRO_HELD: __skb_push(skb, ETH_HLEN); skb->protocol = eth_type_trans(skb, skb->dev); if (ret == GRO_NORMAL) gro_normal_one(&napi->gro, skb, 1); break; case GRO_MERGED_FREE: if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) napi_skb_free_stolen_head(skb); else napi_reuse_skb(napi, skb); break; case GRO_MERGED: case GRO_CONSUMED: break; } return ret; } /* Upper GRO stack assumes network header starts at gro_offset=0 * Drivers could call both napi_gro_frags() and napi_gro_receive() * We copy ethernet header into skb->data to have a common layout. */ static struct sk_buff *napi_frags_skb(struct napi_struct *napi) { struct sk_buff *skb = napi->skb; const struct ethhdr *eth; unsigned int hlen = sizeof(*eth); napi->skb = NULL; skb_reset_mac_header(skb); skb_gro_reset_offset(skb, hlen); if (unlikely(!skb_gro_may_pull(skb, hlen))) { eth = skb_gro_header_slow(skb, hlen, 0); if (unlikely(!eth)) { net_warn_ratelimited("%s: dropping impossible skb from %s\n", __func__, napi->dev->name); napi_reuse_skb(napi, skb); return NULL; } } else { eth = (const struct ethhdr *)skb->data; if (NAPI_GRO_CB(skb)->frag0 != skb->data) gro_pull_from_frag0(skb, hlen); NAPI_GRO_CB(skb)->frag0 += hlen; NAPI_GRO_CB(skb)->frag0_len -= hlen; } __skb_pull(skb, hlen); /* * This works because the only protocols we care about don't require * special handling. * We'll fix it up properly in napi_frags_finish() */ skb->protocol = eth->h_proto; return skb; } gro_result_t napi_gro_frags(struct napi_struct *napi) { gro_result_t ret; struct sk_buff *skb = napi_frags_skb(napi); trace_napi_gro_frags_entry(skb); ret = napi_frags_finish(napi, skb, dev_gro_receive(&napi->gro, skb)); trace_napi_gro_frags_exit(ret); return ret; } EXPORT_SYMBOL(napi_gro_frags); /* Compute the checksum from gro_offset and return the folded value * after adding in any pseudo checksum. */ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) { __wsum wsum; __sum16 sum; wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); /* See comments in __skb_checksum_complete(). */ if (likely(!sum)) { if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && !skb->csum_complete_sw) netdev_rx_csum_fault(skb->dev, skb); } NAPI_GRO_CB(skb)->csum = wsum; NAPI_GRO_CB(skb)->csum_valid = 1; return sum; } EXPORT_SYMBOL(__skb_gro_checksum_complete); void gro_init(struct gro_node *gro) { for (u32 i = 0; i < GRO_HASH_BUCKETS; i++) { INIT_LIST_HEAD(&gro->hash[i].list); gro->hash[i].count = 0; } gro->bitmask = 0; gro->cached_napi_id = 0; INIT_LIST_HEAD(&gro->rx_list); gro->rx_count = 0; } void gro_cleanup(struct gro_node *gro) { struct sk_buff *skb, *n; for (u32 i = 0; i < GRO_HASH_BUCKETS; i++) { list_for_each_entry_safe(skb, n, &gro->hash[i].list, list) kfree_skb(skb); gro->hash[i].count = 0; } gro->bitmask = 0; gro->cached_napi_id = 0; list_for_each_entry_safe(skb, n, &gro->rx_list, list) kfree_skb(skb); gro->rx_count = 0; } |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 1999-2002 Vojtech Pavlik */ #ifndef _SERIO_H #define _SERIO_H #include <linux/cleanup.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/mod_devicetable.h> #include <uapi/linux/serio.h> extern const struct bus_type serio_bus; struct serio { void *port_data; char name[32]; char phys[32]; char firmware_id[128]; bool manual_bind; struct serio_device_id id; /* Protects critical sections from port's interrupt handler */ spinlock_t lock; int (*write)(struct serio *, unsigned char); int (*open)(struct serio *); void (*close)(struct serio *); int (*start)(struct serio *); void (*stop)(struct serio *); struct serio *parent; /* Entry in parent->children list */ struct list_head child_node; struct list_head children; /* Level of nesting in serio hierarchy */ unsigned int depth; /* * serio->drv is accessed from interrupt handlers; when modifying * caller should acquire serio->drv_mutex and serio->lock. */ struct serio_driver *drv; /* Protects serio->drv so attributes can pin current driver */ struct mutex drv_mutex; struct device dev; struct list_head node; /* * For use by PS/2 layer when several ports share hardware and * may get indigestion when exposed to concurrent access (i8042). */ struct mutex *ps2_cmd_mutex; }; #define to_serio_port(d) container_of(d, struct serio, dev) struct serio_driver { const char *description; const struct serio_device_id *id_table; bool manual_bind; void (*write_wakeup)(struct serio *); irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int); int (*connect)(struct serio *, struct serio_driver *drv); int (*reconnect)(struct serio *); int (*fast_reconnect)(struct serio *); void (*disconnect)(struct serio *); void (*cleanup)(struct serio *); struct device_driver driver; }; #define to_serio_driver(d) container_of_const(d, struct serio_driver, driver) int serio_open(struct serio *serio, struct serio_driver *drv); void serio_close(struct serio *serio); void serio_rescan(struct serio *serio); void serio_reconnect(struct serio *serio); irqreturn_t serio_interrupt(struct serio *serio, unsigned char data, unsigned int flags); void __serio_register_port(struct serio *serio, struct module *owner); /* use a define to avoid include chaining to get THIS_MODULE */ #define serio_register_port(serio) \ __serio_register_port(serio, THIS_MODULE) void serio_unregister_port(struct serio *serio); void serio_unregister_child_port(struct serio *serio); int __must_check __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name); /* use a define to avoid include chaining to get THIS_MODULE & friends */ #define serio_register_driver(drv) \ __serio_register_driver(drv, THIS_MODULE, KBUILD_MODNAME) void serio_unregister_driver(struct serio_driver *drv); /** * module_serio_driver() - Helper macro for registering a serio driver * @__serio_driver: serio_driver struct * * Helper macro for serio drivers which do not do anything special in * module init/exit. This eliminates a lot of boilerplate. Each module * may only use this macro once, and calling it replaces module_init() * and module_exit(). */ #define module_serio_driver(__serio_driver) \ module_driver(__serio_driver, serio_register_driver, \ serio_unregister_driver) static inline int serio_write(struct serio *serio, unsigned char data) { if (serio->write) return serio->write(serio, data); else return -1; } static inline void serio_drv_write_wakeup(struct serio *serio) { if (serio->drv && serio->drv->write_wakeup) serio->drv->write_wakeup(serio); } /* * Use the following functions to manipulate serio's per-port * driver-specific data. */ static inline void *serio_get_drvdata(struct serio *serio) { return dev_get_drvdata(&serio->dev); } static inline void serio_set_drvdata(struct serio *serio, void *data) { dev_set_drvdata(&serio->dev, data); } /* * Use the following functions to protect critical sections in * driver code from port's interrupt handler */ static inline void serio_pause_rx(struct serio *serio) { spin_lock_irq(&serio->lock); } static inline void serio_continue_rx(struct serio *serio) { spin_unlock_irq(&serio->lock); } DEFINE_GUARD(serio_pause_rx, struct serio *, serio_pause_rx(_T), serio_continue_rx(_T)) #endif |
| 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. */ #include <linux/skbuff.h> #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/if_bonding.h> #include <linux/pkt_sched.h> #include <net/net_namespace.h> #include <net/bonding.h> #include <net/bond_3ad.h> #include <net/netlink.h> /* General definitions */ #define AD_SHORT_TIMEOUT 1 #define AD_LONG_TIMEOUT 0 #define AD_STANDBY 0x2 #define AD_MAX_TX_IN_SECOND 3 #define AD_COLLECTOR_MAX_DELAY 0 /* Timer definitions (43.4.4 in the 802.3ad standard) */ #define AD_FAST_PERIODIC_TIME 1 #define AD_SLOW_PERIODIC_TIME 30 #define AD_SHORT_TIMEOUT_TIME (3*AD_FAST_PERIODIC_TIME) #define AD_LONG_TIMEOUT_TIME (3*AD_SLOW_PERIODIC_TIME) #define AD_CHURN_DETECTION_TIME 60 #define AD_AGGREGATE_WAIT_TIME 2 /* Port Variables definitions used by the State Machines (43.4.7 in the * 802.3ad standard) */ #define AD_PORT_BEGIN 0x1 #define AD_PORT_LACP_ENABLED 0x2 #define AD_PORT_ACTOR_CHURN 0x4 #define AD_PORT_PARTNER_CHURN 0x8 #define AD_PORT_READY 0x10 #define AD_PORT_READY_N 0x20 #define AD_PORT_MATCHED 0x40 #define AD_PORT_STANDBY 0x80 #define AD_PORT_SELECTED 0x100 #define AD_PORT_MOVED 0x200 #define AD_PORT_CHURNED (AD_PORT_ACTOR_CHURN | AD_PORT_PARTNER_CHURN) /* Port Key definitions * key is determined according to the link speed, duplex and * user key (which is yet not supported) * -------------------------------------------------------------- * Port key | User key (10 bits) | Speed (5 bits) | Duplex| * -------------------------------------------------------------- * |15 6|5 1|0 */ #define AD_DUPLEX_KEY_MASKS 0x1 #define AD_SPEED_KEY_MASKS 0x3E #define AD_USER_KEY_MASKS 0xFFC0 enum ad_link_speed_type { AD_LINK_SPEED_1MBPS = 1, AD_LINK_SPEED_10MBPS, AD_LINK_SPEED_100MBPS, AD_LINK_SPEED_1000MBPS, AD_LINK_SPEED_2500MBPS, AD_LINK_SPEED_5000MBPS, AD_LINK_SPEED_10000MBPS, AD_LINK_SPEED_14000MBPS, AD_LINK_SPEED_20000MBPS, AD_LINK_SPEED_25000MBPS, AD_LINK_SPEED_40000MBPS, AD_LINK_SPEED_50000MBPS, AD_LINK_SPEED_56000MBPS, AD_LINK_SPEED_100000MBPS, AD_LINK_SPEED_200000MBPS, AD_LINK_SPEED_400000MBPS, AD_LINK_SPEED_800000MBPS, }; /* compare MAC addresses */ #define MAC_ADDRESS_EQUAL(A, B) \ ether_addr_equal_64bits((const u8 *)A, (const u8 *)B) static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL; static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000; const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 }; /* ================= main 802.3ad protocol functions ================== */ static int ad_lacpdu_send(struct port *port); static int ad_marker_send(struct port *port, struct bond_marker *marker); static void ad_mux_machine(struct port *port, bool *update_slave_arr); static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port); static void ad_tx_machine(struct port *port); static void ad_periodic_machine(struct port *port); static void ad_port_selection_logic(struct port *port, bool *update_slave_arr); static void ad_agg_selection_logic(struct aggregator *aggregator, bool *update_slave_arr); static void ad_clear_agg(struct aggregator *aggregator); static void ad_initialize_agg(struct aggregator *aggregator); static void ad_initialize_port(struct port *port, const struct bond_params *bond_params); static void ad_enable_collecting(struct port *port); static void ad_disable_distributing(struct port *port, bool *update_slave_arr); static void ad_enable_collecting_distributing(struct port *port, bool *update_slave_arr); static void ad_disable_collecting_distributing(struct port *port, bool *update_slave_arr); static void ad_marker_info_received(struct bond_marker *marker_info, struct port *port); static void ad_marker_response_received(struct bond_marker *marker, struct port *port); static void ad_update_actor_keys(struct port *port, bool reset); /* ================= api to bonding and kernel code ================== */ /** * __get_bond_by_port - get the port's bonding struct * @port: the port we're looking at * * Return @port's bonding struct, or %NULL if it can't be found. */ static inline struct bonding *__get_bond_by_port(struct port *port) { if (port->slave == NULL) return NULL; return bond_get_bond_by_slave(port->slave); } /** * __get_first_agg - get the first aggregator in the bond * @port: the port we're looking at * * Return the aggregator of the first slave in @bond, or %NULL if it can't be * found. * The caller must hold RCU or RTNL lock. */ static inline struct aggregator *__get_first_agg(struct port *port) { struct bonding *bond = __get_bond_by_port(port); struct slave *first_slave; struct aggregator *agg; /* If there's no bond for this port, or bond has no slaves */ if (bond == NULL) return NULL; rcu_read_lock(); first_slave = bond_first_slave_rcu(bond); agg = first_slave ? &(SLAVE_AD_INFO(first_slave)->aggregator) : NULL; rcu_read_unlock(); return agg; } /** * __agg_has_partner - see if we have a partner * @agg: the agregator we're looking at * * Return nonzero if aggregator has a partner (denoted by a non-zero ether * address for the partner). Return 0 if not. */ static inline int __agg_has_partner(struct aggregator *agg) { return !is_zero_ether_addr(agg->partner_system.mac_addr_value); } /** * __disable_distributing_port - disable the port's slave for distributing. * Port will still be able to collect. * @port: the port we're looking at * * This will disable only distributing on the port's slave. */ static void __disable_distributing_port(struct port *port) { bond_set_slave_tx_disabled_flags(port->slave, BOND_SLAVE_NOTIFY_LATER); } /** * __enable_collecting_port - enable the port's slave for collecting, * if it's up * @port: the port we're looking at * * This will enable only collecting on the port's slave. */ static void __enable_collecting_port(struct port *port) { struct slave *slave = port->slave; if (slave->link == BOND_LINK_UP && bond_slave_is_up(slave)) bond_set_slave_rx_enabled_flags(slave, BOND_SLAVE_NOTIFY_LATER); } /** * __disable_port - disable the port's slave * @port: the port we're looking at * * This will disable both collecting and distributing on the port's slave. */ static inline void __disable_port(struct port *port) { bond_set_slave_inactive_flags(port->slave, BOND_SLAVE_NOTIFY_LATER); } /** * __enable_port - enable the port's slave, if it's up * @port: the port we're looking at * * This will enable both collecting and distributing on the port's slave. */ static inline void __enable_port(struct port *port) { struct slave *slave = port->slave; if ((slave->link == BOND_LINK_UP) && bond_slave_is_up(slave)) bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER); } /** * __port_move_to_attached_state - check if port should transition back to attached * state. * @port: the port we're looking at */ static bool __port_move_to_attached_state(struct port *port) { if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY) || !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) || !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) port->sm_mux_state = AD_MUX_ATTACHED; return port->sm_mux_state == AD_MUX_ATTACHED; } /** * __port_is_collecting_distributing - check if the port's slave is in the * combined collecting/distributing state * @port: the port we're looking at */ static int __port_is_collecting_distributing(struct port *port) { return bond_is_active_slave(port->slave); } /** * __get_agg_selection_mode - get the aggregator selection mode * @port: the port we're looking at * * Get the aggregator selection mode. Can be %STABLE, %BANDWIDTH or %COUNT. */ static inline u32 __get_agg_selection_mode(struct port *port) { struct bonding *bond = __get_bond_by_port(port); if (bond == NULL) return BOND_AD_STABLE; return bond->params.ad_select; } /** * __check_agg_selection_timer - check if the selection timer has expired * @port: the port we're looking at */ static inline int __check_agg_selection_timer(struct port *port) { struct bonding *bond = __get_bond_by_port(port); if (bond == NULL) return 0; return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0; } /** * __get_link_speed - get a port's speed * @port: the port we're looking at * * Return @port's speed in 802.3ad enum format. i.e. one of: * 0, * %AD_LINK_SPEED_10MBPS, * %AD_LINK_SPEED_100MBPS, * %AD_LINK_SPEED_1000MBPS, * %AD_LINK_SPEED_2500MBPS, * %AD_LINK_SPEED_5000MBPS, * %AD_LINK_SPEED_10000MBPS * %AD_LINK_SPEED_14000MBPS, * %AD_LINK_SPEED_20000MBPS * %AD_LINK_SPEED_25000MBPS * %AD_LINK_SPEED_40000MBPS * %AD_LINK_SPEED_50000MBPS * %AD_LINK_SPEED_56000MBPS * %AD_LINK_SPEED_100000MBPS * %AD_LINK_SPEED_200000MBPS * %AD_LINK_SPEED_400000MBPS * %AD_LINK_SPEED_800000MBPS */ static u16 __get_link_speed(struct port *port) { struct slave *slave = port->slave; u16 speed; /* this if covers only a special case: when the configuration starts * with link down, it sets the speed to 0. * This is done in spite of the fact that the e100 driver reports 0 * to be compatible with MVT in the future. */ if (slave->link != BOND_LINK_UP) speed = 0; else { switch (slave->speed) { case SPEED_10: speed = AD_LINK_SPEED_10MBPS; break; case SPEED_100: speed = AD_LINK_SPEED_100MBPS; break; case SPEED_1000: speed = AD_LINK_SPEED_1000MBPS; break; case SPEED_2500: speed = AD_LINK_SPEED_2500MBPS; break; case SPEED_5000: speed = AD_LINK_SPEED_5000MBPS; break; case SPEED_10000: speed = AD_LINK_SPEED_10000MBPS; break; case SPEED_14000: speed = AD_LINK_SPEED_14000MBPS; break; case SPEED_20000: speed = AD_LINK_SPEED_20000MBPS; break; case SPEED_25000: speed = AD_LINK_SPEED_25000MBPS; break; case SPEED_40000: speed = AD_LINK_SPEED_40000MBPS; break; case SPEED_50000: speed = AD_LINK_SPEED_50000MBPS; break; case SPEED_56000: speed = AD_LINK_SPEED_56000MBPS; break; case SPEED_100000: speed = AD_LINK_SPEED_100000MBPS; break; case SPEED_200000: speed = AD_LINK_SPEED_200000MBPS; break; case SPEED_400000: speed = AD_LINK_SPEED_400000MBPS; break; case SPEED_800000: speed = AD_LINK_SPEED_800000MBPS; break; default: /* unknown speed value from ethtool. shouldn't happen */ if (slave->speed != SPEED_UNKNOWN) pr_err_once("%s: (slave %s): unknown ethtool speed (%d) for port %d (set it to 0)\n", slave->bond->dev->name, slave->dev->name, slave->speed, port->actor_port_number); speed = 0; break; } } slave_dbg(slave->bond->dev, slave->dev, "Port %d Received link speed %d update from adapter\n", port->actor_port_number, speed); return speed; } /** * __get_duplex - get a port's duplex * @port: the port we're looking at * * Return @port's duplex in 802.3ad bitmask format. i.e.: * 0x01 if in full duplex * 0x00 otherwise */ static u8 __get_duplex(struct port *port) { struct slave *slave = port->slave; u8 retval = 0x0; /* handling a special case: when the configuration starts with * link down, it sets the duplex to 0. */ if (slave->link == BOND_LINK_UP) { switch (slave->duplex) { case DUPLEX_FULL: retval = 0x1; slave_dbg(slave->bond->dev, slave->dev, "Port %d Received status full duplex update from adapter\n", port->actor_port_number); break; case DUPLEX_HALF: default: retval = 0x0; slave_dbg(slave->bond->dev, slave->dev, "Port %d Received status NOT full duplex update from adapter\n", port->actor_port_number); break; } } return retval; } static void __ad_actor_update_port(struct port *port) { const struct bonding *bond = bond_get_bond_by_slave(port->slave); port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr; port->actor_system_priority = BOND_AD_INFO(bond).system.sys_priority; } /* Conversions */ /** * __ad_timer_to_ticks - convert a given timer type to AD module ticks * @timer_type: which timer to operate * @par: timer parameter. see below * * If @timer_type is %current_while_timer, @par indicates long/short timer. * If @timer_type is %periodic_timer, @par is one of %FAST_PERIODIC_TIME, * %SLOW_PERIODIC_TIME. */ static u16 __ad_timer_to_ticks(u16 timer_type, u16 par) { u16 retval = 0; /* to silence the compiler */ switch (timer_type) { case AD_CURRENT_WHILE_TIMER: /* for rx machine usage */ if (par) retval = (AD_SHORT_TIMEOUT_TIME*ad_ticks_per_sec); else retval = (AD_LONG_TIMEOUT_TIME*ad_ticks_per_sec); break; case AD_ACTOR_CHURN_TIMER: /* for local churn machine */ retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec); break; case AD_PERIODIC_TIMER: /* for periodic machine */ retval = (par*ad_ticks_per_sec); /* long timeout */ break; case AD_PARTNER_CHURN_TIMER: /* for remote churn machine */ retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec); break; case AD_WAIT_WHILE_TIMER: /* for selection machine */ retval = (AD_AGGREGATE_WAIT_TIME*ad_ticks_per_sec); break; } return retval; } /* ================= ad_rx_machine helper functions ================== */ /** * __choose_matched - update a port's matched variable from a received lacpdu * @lacpdu: the lacpdu we've received * @port: the port we're looking at * * Update the value of the matched variable, using parameter values from a * newly received lacpdu. Parameter values for the partner carried in the * received PDU are compared with the corresponding operational parameter * values for the actor. Matched is set to TRUE if all of these parameters * match and the PDU parameter partner_state.aggregation has the same value as * actor_oper_port_state.aggregation and lacp will actively maintain the link * in the aggregation. Matched is also set to TRUE if the value of * actor_state.aggregation in the received PDU is set to FALSE, i.e., indicates * an individual link and lacp will actively maintain the link. Otherwise, * matched is set to FALSE. LACP is considered to be actively maintaining the * link if either the PDU's actor_state.lacp_activity variable is TRUE or both * the actor's actor_oper_port_state.lacp_activity and the PDU's * partner_state.lacp_activity variables are TRUE. * * Note: the AD_PORT_MATCHED "variable" is not specified by 802.3ad; it is * used here to implement the language from 802.3ad 43.4.9 that requires * recordPDU to "match" the LACPDU parameters to the stored values. */ static void __choose_matched(struct lacpdu *lacpdu, struct port *port) { /* check if all parameters are alike * or this is individual link(aggregation == FALSE) * then update the state machine Matched variable. */ if (((ntohs(lacpdu->partner_port) == port->actor_port_number) && (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) && MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) && (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) && (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) && ((lacpdu->partner_state & LACP_STATE_AGGREGATION) == (port->actor_oper_port_state & LACP_STATE_AGGREGATION))) || ((lacpdu->actor_state & LACP_STATE_AGGREGATION) == 0) ) { port->sm_vars |= AD_PORT_MATCHED; } else { port->sm_vars &= ~AD_PORT_MATCHED; } } /** * __record_pdu - record parameters from a received lacpdu * @lacpdu: the lacpdu we've received * @port: the port we're looking at * * Record the parameter values for the Actor carried in a received lacpdu as * the current partner operational parameter values and sets * actor_oper_port_state.defaulted to FALSE. */ static void __record_pdu(struct lacpdu *lacpdu, struct port *port) { if (lacpdu && port) { struct port_params *partner = &port->partner_oper; __choose_matched(lacpdu, port); /* record the new parameter values for the partner * operational */ partner->port_number = ntohs(lacpdu->actor_port); partner->port_priority = ntohs(lacpdu->actor_port_priority); partner->system = lacpdu->actor_system; partner->system_priority = ntohs(lacpdu->actor_system_priority); partner->key = ntohs(lacpdu->actor_key); partner->port_state = lacpdu->actor_state; /* set actor_oper_port_state.defaulted to FALSE */ port->actor_oper_port_state &= ~LACP_STATE_DEFAULTED; /* set the partner sync. to on if the partner is sync, * and the port is matched */ if ((port->sm_vars & AD_PORT_MATCHED) && (lacpdu->actor_state & LACP_STATE_SYNCHRONIZATION)) { partner->port_state |= LACP_STATE_SYNCHRONIZATION; slave_dbg(port->slave->bond->dev, port->slave->dev, "partner sync=1\n"); } else { partner->port_state &= ~LACP_STATE_SYNCHRONIZATION; slave_dbg(port->slave->bond->dev, port->slave->dev, "partner sync=0\n"); } } } /** * __record_default - record default parameters * @port: the port we're looking at * * This function records the default parameter values for the partner carried * in the Partner Admin parameters as the current partner operational parameter * values and sets actor_oper_port_state.defaulted to TRUE. */ static void __record_default(struct port *port) { if (port) { /* record the partner admin parameters */ memcpy(&port->partner_oper, &port->partner_admin, sizeof(struct port_params)); /* set actor_oper_port_state.defaulted to true */ port->actor_oper_port_state |= LACP_STATE_DEFAULTED; } } /** * __update_selected - update a port's Selected variable from a received lacpdu * @lacpdu: the lacpdu we've received * @port: the port we're looking at * * Update the value of the selected variable, using parameter values from a * newly received lacpdu. The parameter values for the Actor carried in the * received PDU are compared with the corresponding operational parameter * values for the ports partner. If one or more of the comparisons shows that * the value(s) received in the PDU differ from the current operational values, * then selected is set to FALSE and actor_oper_port_state.synchronization is * set to out_of_sync. Otherwise, selected remains unchanged. */ static void __update_selected(struct lacpdu *lacpdu, struct port *port) { if (lacpdu && port) { const struct port_params *partner = &port->partner_oper; /* check if any parameter is different then * update the state machine selected variable. */ if (ntohs(lacpdu->actor_port) != partner->port_number || ntohs(lacpdu->actor_port_priority) != partner->port_priority || !MAC_ADDRESS_EQUAL(&lacpdu->actor_system, &partner->system) || ntohs(lacpdu->actor_system_priority) != partner->system_priority || ntohs(lacpdu->actor_key) != partner->key || (lacpdu->actor_state & LACP_STATE_AGGREGATION) != (partner->port_state & LACP_STATE_AGGREGATION)) { port->sm_vars &= ~AD_PORT_SELECTED; } } } /** * __update_default_selected - update a port's Selected variable from Partner * @port: the port we're looking at * * This function updates the value of the selected variable, using the partner * administrative parameter values. The administrative values are compared with * the corresponding operational parameter values for the partner. If one or * more of the comparisons shows that the administrative value(s) differ from * the current operational values, then Selected is set to FALSE and * actor_oper_port_state.synchronization is set to OUT_OF_SYNC. Otherwise, * Selected remains unchanged. */ static void __update_default_selected(struct port *port) { if (port) { const struct port_params *admin = &port->partner_admin; const struct port_params *oper = &port->partner_oper; /* check if any parameter is different then * update the state machine selected variable. */ if (admin->port_number != oper->port_number || admin->port_priority != oper->port_priority || !MAC_ADDRESS_EQUAL(&admin->system, &oper->system) || admin->system_priority != oper->system_priority || admin->key != oper->key || (admin->port_state & LACP_STATE_AGGREGATION) != (oper->port_state & LACP_STATE_AGGREGATION)) { port->sm_vars &= ~AD_PORT_SELECTED; } } } /** * __update_ntt - update a port's ntt variable from a received lacpdu * @lacpdu: the lacpdu we've received * @port: the port we're looking at * * Updates the value of the ntt variable, using parameter values from a newly * received lacpdu. The parameter values for the partner carried in the * received PDU are compared with the corresponding operational parameter * values for the Actor. If one or more of the comparisons shows that the * value(s) received in the PDU differ from the current operational values, * then ntt is set to TRUE. Otherwise, ntt remains unchanged. */ static void __update_ntt(struct lacpdu *lacpdu, struct port *port) { /* validate lacpdu and port */ if (lacpdu && port) { /* check if any parameter is different then * update the port->ntt. */ if ((ntohs(lacpdu->partner_port) != port->actor_port_number) || (ntohs(lacpdu->partner_port_priority) != port->actor_port_priority) || !MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) || (ntohs(lacpdu->partner_system_priority) != port->actor_system_priority) || (ntohs(lacpdu->partner_key) != port->actor_oper_port_key) || ((lacpdu->partner_state & LACP_STATE_LACP_ACTIVITY) != (port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY)) || ((lacpdu->partner_state & LACP_STATE_LACP_TIMEOUT) != (port->actor_oper_port_state & LACP_STATE_LACP_TIMEOUT)) || ((lacpdu->partner_state & LACP_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) || ((lacpdu->partner_state & LACP_STATE_AGGREGATION) != (port->actor_oper_port_state & LACP_STATE_AGGREGATION)) ) { port->ntt = true; } } } /** * __agg_ports_are_ready - check if all ports in an aggregator are ready * @aggregator: the aggregator we're looking at * */ static int __agg_ports_are_ready(struct aggregator *aggregator) { struct port *port; int retval = 1; if (aggregator) { /* scan all ports in this aggregator to verfy if they are * all ready. */ for (port = aggregator->lag_ports; port; port = port->next_port_in_aggregator) { if (!(port->sm_vars & AD_PORT_READY_N)) { retval = 0; break; } } } return retval; } /** * __set_agg_ports_ready - set value of Ready bit in all ports of an aggregator * @aggregator: the aggregator we're looking at * @val: Should the ports' ready bit be set on or off * */ static void __set_agg_ports_ready(struct aggregator *aggregator, int val) { struct port *port; for (port = aggregator->lag_ports; port; port = port->next_port_in_aggregator) { if (val) port->sm_vars |= AD_PORT_READY; else port->sm_vars &= ~AD_PORT_READY; } } static int __agg_active_ports(struct aggregator *agg) { struct port *port; int active = 0; for (port = agg->lag_ports; port; port = port->next_port_in_aggregator) { if (port->is_enabled) active++; } return active; } /** * __get_agg_bandwidth - get the total bandwidth of an aggregator * @aggregator: the aggregator we're looking at * */ static u32 __get_agg_bandwidth(struct aggregator *aggregator) { int nports = __agg_active_ports(aggregator); u32 bandwidth = 0; if (nports) { switch (__get_link_speed(aggregator->lag_ports)) { case AD_LINK_SPEED_1MBPS: bandwidth = nports; break; case AD_LINK_SPEED_10MBPS: bandwidth = nports * 10; break; case AD_LINK_SPEED_100MBPS: bandwidth = nports * 100; break; case AD_LINK_SPEED_1000MBPS: bandwidth = nports * 1000; break; case AD_LINK_SPEED_2500MBPS: bandwidth = nports * 2500; break; case AD_LINK_SPEED_5000MBPS: bandwidth = nports * 5000; break; case AD_LINK_SPEED_10000MBPS: bandwidth = nports * 10000; break; case AD_LINK_SPEED_14000MBPS: bandwidth = nports * 14000; break; case AD_LINK_SPEED_20000MBPS: bandwidth = nports * 20000; break; case AD_LINK_SPEED_25000MBPS: bandwidth = nports * 25000; break; case AD_LINK_SPEED_40000MBPS: bandwidth = nports * 40000; break; case AD_LINK_SPEED_50000MBPS: bandwidth = nports * 50000; break; case AD_LINK_SPEED_56000MBPS: bandwidth = nports * 56000; break; case AD_LINK_SPEED_100000MBPS: bandwidth = nports * 100000; break; case AD_LINK_SPEED_200000MBPS: bandwidth = nports * 200000; break; case AD_LINK_SPEED_400000MBPS: bandwidth = nports * 400000; break; case AD_LINK_SPEED_800000MBPS: bandwidth = nports * 800000; break; default: bandwidth = 0; /* to silence the compiler */ } } return bandwidth; } /** * __get_active_agg - get the current active aggregator * @aggregator: the aggregator we're looking at * * Caller must hold RCU lock. */ static struct aggregator *__get_active_agg(struct aggregator *aggregator) { struct bonding *bond = aggregator->slave->bond; struct list_head *iter; struct slave *slave; bond_for_each_slave_rcu(bond, slave, iter) if (SLAVE_AD_INFO(slave)->aggregator.is_active) return &(SLAVE_AD_INFO(slave)->aggregator); return NULL; } /** * __update_lacpdu_from_port - update a port's lacpdu fields * @port: the port we're looking at */ static inline void __update_lacpdu_from_port(struct port *port) { struct lacpdu *lacpdu = &port->lacpdu; const struct port_params *partner = &port->partner_oper; /* update current actual Actor parameters * lacpdu->subtype initialized * lacpdu->version_number initialized * lacpdu->tlv_type_actor_info initialized * lacpdu->actor_information_length initialized */ lacpdu->actor_system_priority = htons(port->actor_system_priority); lacpdu->actor_system = port->actor_system; lacpdu->actor_key = htons(port->actor_oper_port_key); lacpdu->actor_port_priority = htons(port->actor_port_priority); lacpdu->actor_port = htons(port->actor_port_number); lacpdu->actor_state = port->actor_oper_port_state; slave_dbg(port->slave->bond->dev, port->slave->dev, "update lacpdu: actor port state %x\n", port->actor_oper_port_state); /* lacpdu->reserved_3_1 initialized * lacpdu->tlv_type_partner_info initialized * lacpdu->partner_information_length initialized */ lacpdu->partner_system_priority = htons(partner->system_priority); lacpdu->partner_system = partner->system; lacpdu->partner_key = htons(partner->key); lacpdu->partner_port_priority = htons(partner->port_priority); lacpdu->partner_port = htons(partner->port_number); lacpdu->partner_state = partner->port_state; /* lacpdu->reserved_3_2 initialized * lacpdu->tlv_type_collector_info initialized * lacpdu->collector_information_length initialized * collector_max_delay initialized * reserved_12[12] initialized * tlv_type_terminator initialized * terminator_length initialized * reserved_50[50] initialized */ } /* ================= main 802.3ad protocol code ========================= */ /** * ad_lacpdu_send - send out a lacpdu packet on a given port * @port: the port we're looking at * * Returns: 0 on success * < 0 on error */ static int ad_lacpdu_send(struct port *port) { struct slave *slave = port->slave; struct sk_buff *skb; struct lacpdu_header *lacpdu_header; int length = sizeof(struct lacpdu_header); skb = dev_alloc_skb(length); if (!skb) return -ENOMEM; atomic64_inc(&SLAVE_AD_INFO(slave)->stats.lacpdu_tx); atomic64_inc(&BOND_AD_INFO(slave->bond).stats.lacpdu_tx); skb->dev = slave->dev; skb_reset_mac_header(skb); skb->network_header = skb->mac_header + ETH_HLEN; skb->protocol = PKT_TYPE_LACPDU; skb->priority = TC_PRIO_CONTROL; lacpdu_header = skb_put(skb, length); ether_addr_copy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr); /* Note: source address is set to be the member's PERMANENT address, * because we use it to identify loopback lacpdus in receive. */ ether_addr_copy(lacpdu_header->hdr.h_source, slave->perm_hwaddr); lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU; lacpdu_header->lacpdu = port->lacpdu; dev_queue_xmit(skb); return 0; } /** * ad_marker_send - send marker information/response on a given port * @port: the port we're looking at * @marker: marker data to send * * Returns: 0 on success * < 0 on error */ static int ad_marker_send(struct port *port, struct bond_marker *marker) { struct slave *slave = port->slave; struct sk_buff *skb; struct bond_marker_header *marker_header; int length = sizeof(struct bond_marker_header); skb = dev_alloc_skb(length + 16); if (!skb) return -ENOMEM; switch (marker->tlv_type) { case AD_MARKER_INFORMATION_SUBTYPE: atomic64_inc(&SLAVE_AD_INFO(slave)->stats.marker_tx); atomic64_inc(&BOND_AD_INFO(slave->bond).stats.marker_tx); break; case AD_MARKER_RESPONSE_SUBTYPE: atomic64_inc(&SLAVE_AD_INFO(slave)->stats.marker_resp_tx); atomic64_inc(&BOND_AD_INFO(slave->bond).stats.marker_resp_tx); break; } skb_reserve(skb, 16); skb->dev = slave->dev; skb_reset_mac_header(skb); skb->network_header = skb->mac_header + ETH_HLEN; skb->protocol = PKT_TYPE_LACPDU; marker_header = skb_put(skb, length); ether_addr_copy(marker_header->hdr.h_dest, lacpdu_mcast_addr); /* Note: source address is set to be the member's PERMANENT address, * because we use it to identify loopback MARKERs in receive. */ ether_addr_copy(marker_header->hdr.h_source, slave->perm_hwaddr); marker_header->hdr.h_proto = PKT_TYPE_LACPDU; marker_header->marker = *marker; dev_queue_xmit(skb); return 0; } static void ad_cond_set_peer_notif(struct port *port) { struct bonding *bond = port->slave->bond; if (bond->params.broadcast_neighbor && rtnl_trylock()) { bond->send_peer_notif = bond->params.num_peer_notif * max(1, bond->params.peer_notif_delay); rtnl_unlock(); } } /** * ad_mux_machine - handle a port's mux state machine * @port: the port we're looking at * @update_slave_arr: Does slave array need update? */ static void ad_mux_machine(struct port *port, bool *update_slave_arr) { struct bonding *bond = __get_bond_by_port(port); mux_states_t last_state; /* keep current State Machine state to compare later if it was * changed */ last_state = port->sm_mux_state; if (port->sm_vars & AD_PORT_BEGIN) { port->sm_mux_state = AD_MUX_DETACHED; } else { switch (port->sm_mux_state) { case AD_MUX_DETACHED: if ((port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY)) /* if SELECTED or STANDBY */ port->sm_mux_state = AD_MUX_WAITING; break; case AD_MUX_WAITING: /* if SELECTED == FALSE return to DETACH state */ if (!(port->sm_vars & AD_PORT_SELECTED)) { port->sm_vars &= ~AD_PORT_READY_N; /* in order to withhold the Selection Logic to * check all ports READY_N value every callback * cycle to update ready variable, we check * READY_N and update READY here */ __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator)); port->sm_mux_state = AD_MUX_DETACHED; break; } /* check if the wait_while_timer expired */ if (port->sm_mux_timer_counter && !(--port->sm_mux_timer_counter)) port->sm_vars |= AD_PORT_READY_N; /* in order to withhold the selection logic to check * all ports READY_N value every callback cycle to * update ready variable, we check READY_N and update * READY here */ __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator)); /* if the wait_while_timer expired, and the port is * in READY state, move to ATTACHED state */ if ((port->sm_vars & AD_PORT_READY) && !port->sm_mux_timer_counter) port->sm_mux_state = AD_MUX_ATTACHED; break; case AD_MUX_ATTACHED: /* check also if agg_select_timer expired (so the * edable port will take place only after this timer) */ if ((port->sm_vars & AD_PORT_SELECTED) && (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) && !__check_agg_selection_timer(port)) { if (port->aggregator->is_active) { int state = AD_MUX_COLLECTING_DISTRIBUTING; if (!bond->params.coupled_control) state = AD_MUX_COLLECTING; port->sm_mux_state = state; } } else if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY)) { /* if UNSELECTED or STANDBY */ port->sm_vars &= ~AD_PORT_READY_N; /* in order to withhold the selection logic to * check all ports READY_N value every callback * cycle to update ready variable, we check * READY_N and update READY here */ __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator)); port->sm_mux_state = AD_MUX_DETACHED; } else if (port->aggregator->is_active) { port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION; } break; case AD_MUX_COLLECTING_DISTRIBUTING: if (!__port_move_to_attached_state(port)) { /* if port state hasn't changed make * sure that a collecting distributing * port in an active aggregator is enabled */ if (port->aggregator->is_active && !__port_is_collecting_distributing(port)) { __enable_port(port); *update_slave_arr = true; } } break; case AD_MUX_COLLECTING: if (!__port_move_to_attached_state(port)) { if ((port->sm_vars & AD_PORT_SELECTED) && (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) && (port->partner_oper.port_state & LACP_STATE_COLLECTING)) { port->sm_mux_state = AD_MUX_DISTRIBUTING; } else { /* If port state hasn't changed, make sure that a collecting * port is enabled for an active aggregator. */ struct slave *slave = port->slave; if (port->aggregator->is_active && bond_is_slave_rx_disabled(slave)) { ad_enable_collecting(port); *update_slave_arr = true; } } } break; case AD_MUX_DISTRIBUTING: if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY) || !(port->partner_oper.port_state & LACP_STATE_COLLECTING) || !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) || !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) { port->sm_mux_state = AD_MUX_COLLECTING; } else { /* if port state hasn't changed make * sure that a collecting distributing * port in an active aggregator is enabled */ if (port->aggregator && port->aggregator->is_active && !__port_is_collecting_distributing(port)) { __enable_port(port); *update_slave_arr = true; } } break; default: break; } } /* check if the state machine was changed */ if (port->sm_mux_state != last_state) { slave_dbg(port->slave->bond->dev, port->slave->dev, "Mux Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_mux_state); switch (port->sm_mux_state) { case AD_MUX_DETACHED: port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION; ad_disable_collecting_distributing(port, update_slave_arr); port->actor_oper_port_state &= ~LACP_STATE_COLLECTING; port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING; port->ntt = true; break; case AD_MUX_WAITING: port->sm_mux_timer_counter = __ad_timer_to_ticks(AD_WAIT_WHILE_TIMER, 0); break; case AD_MUX_ATTACHED: if (port->aggregator->is_active) port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION; else port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION; port->actor_oper_port_state &= ~LACP_STATE_COLLECTING; port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING; ad_disable_collecting_distributing(port, update_slave_arr); port->ntt = true; break; case AD_MUX_COLLECTING_DISTRIBUTING: port->actor_oper_port_state |= LACP_STATE_COLLECTING; port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING; port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION; ad_enable_collecting_distributing(port, update_slave_arr); port->ntt = true; break; case AD_MUX_COLLECTING: port->actor_oper_port_state |= LACP_STATE_COLLECTING; port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING; port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION; ad_enable_collecting(port); ad_disable_distributing(port, update_slave_arr); port->ntt = true; break; case AD_MUX_DISTRIBUTING: port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING; port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION; ad_enable_collecting_distributing(port, update_slave_arr); break; default: break; } } } /** * ad_rx_machine - handle a port's rx State Machine * @lacpdu: the lacpdu we've received * @port: the port we're looking at * * If lacpdu arrived, stop previous timer (if exists) and set the next state as * CURRENT. If timer expired set the state machine in the proper state. * In other cases, this function checks if we need to switch to other state. */ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) { rx_states_t last_state; /* keep current State Machine state to compare later if it was * changed */ last_state = port->sm_rx_state; if (lacpdu) { atomic64_inc(&SLAVE_AD_INFO(port->slave)->stats.lacpdu_rx); atomic64_inc(&BOND_AD_INFO(port->slave->bond).stats.lacpdu_rx); } /* check if state machine should change state */ /* first, check if port was reinitialized */ if (port->sm_vars & AD_PORT_BEGIN) { port->sm_rx_state = AD_RX_INITIALIZE; port->sm_vars |= AD_PORT_CHURNED; /* check if port is not enabled */ } else if (!(port->sm_vars & AD_PORT_BEGIN) && !port->is_enabled) port->sm_rx_state = AD_RX_PORT_DISABLED; /* check if new lacpdu arrived */ else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) || (port->sm_rx_state == AD_RX_DEFAULTED) || (port->sm_rx_state == AD_RX_CURRENT))) { if (port->sm_rx_state != AD_RX_CURRENT) port->sm_vars |= AD_PORT_CHURNED; port->sm_rx_timer_counter = 0; port->sm_rx_state = AD_RX_CURRENT; } else { /* if timer is on, and if it is expired */ if (port->sm_rx_timer_counter && !(--port->sm_rx_timer_counter)) { switch (port->sm_rx_state) { case AD_RX_EXPIRED: port->sm_rx_state = AD_RX_DEFAULTED; break; case AD_RX_CURRENT: port->sm_rx_state = AD_RX_EXPIRED; break; default: break; } } else { /* if no lacpdu arrived and no timer is on */ switch (port->sm_rx_state) { case AD_RX_PORT_DISABLED: if (port->is_enabled && (port->sm_vars & AD_PORT_LACP_ENABLED)) port->sm_rx_state = AD_RX_EXPIRED; else if (port->is_enabled && ((port->sm_vars & AD_PORT_LACP_ENABLED) == 0)) port->sm_rx_state = AD_RX_LACP_DISABLED; break; default: break; } } } /* check if the State machine was changed or new lacpdu arrived */ if ((port->sm_rx_state != last_state) || (lacpdu)) { slave_dbg(port->slave->bond->dev, port->slave->dev, "Rx Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_rx_state); switch (port->sm_rx_state) { case AD_RX_INITIALIZE: if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS)) port->sm_vars &= ~AD_PORT_LACP_ENABLED; else port->sm_vars |= AD_PORT_LACP_ENABLED; port->sm_vars &= ~AD_PORT_SELECTED; __record_default(port); port->actor_oper_port_state &= ~LACP_STATE_EXPIRED; port->sm_rx_state = AD_RX_PORT_DISABLED; fallthrough; case AD_RX_PORT_DISABLED: port->sm_vars &= ~AD_PORT_MATCHED; break; case AD_RX_LACP_DISABLED: port->sm_vars &= ~AD_PORT_SELECTED; __record_default(port); port->partner_oper.port_state &= ~LACP_STATE_AGGREGATION; port->sm_vars |= AD_PORT_MATCHED; port->actor_oper_port_state &= ~LACP_STATE_EXPIRED; break; case AD_RX_EXPIRED: /* Reset of the Synchronization flag (Standard 43.4.12) * This reset cause to disable this port in the * COLLECTING_DISTRIBUTING state of the mux machine in * case of EXPIRED even if LINK_DOWN didn't arrive for * the port. */ port->sm_vars &= ~AD_PORT_MATCHED; /* Based on IEEE 8021AX-2014, Figure 6-18 - Receive * machine state diagram, the statue should be * Partner_Oper_Port_State.Synchronization = FALSE; * Partner_Oper_Port_State.LACP_Timeout = Short Timeout; * start current_while_timer(Short Timeout); * Actor_Oper_Port_State.Expired = TRUE; */ port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION; port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT; port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT)); port->actor_oper_port_state |= LACP_STATE_EXPIRED; port->sm_vars |= AD_PORT_CHURNED; break; case AD_RX_DEFAULTED: __update_default_selected(port); __record_default(port); port->sm_vars |= AD_PORT_MATCHED; port->actor_oper_port_state &= ~LACP_STATE_EXPIRED; break; case AD_RX_CURRENT: /* detect loopback situation */ if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system), &(port->actor_system))) { slave_err(port->slave->bond->dev, port->slave->dev, "An illegal loopback occurred on slave\n" "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n"); return; } __update_selected(lacpdu, port); __update_ntt(lacpdu, port); __record_pdu(lacpdu, port); port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & LACP_STATE_LACP_TIMEOUT)); port->actor_oper_port_state &= ~LACP_STATE_EXPIRED; break; default: break; } } } /** * ad_churn_machine - handle port churn's state machine * @port: the port we're looking at * */ static void ad_churn_machine(struct port *port) { if (port->sm_vars & AD_PORT_CHURNED) { port->sm_vars &= ~AD_PORT_CHURNED; port->sm_churn_actor_state = AD_CHURN_MONITOR; port->sm_churn_partner_state = AD_CHURN_MONITOR; port->sm_churn_actor_timer_counter = __ad_timer_to_ticks(AD_ACTOR_CHURN_TIMER, 0); port->sm_churn_partner_timer_counter = __ad_timer_to_ticks(AD_PARTNER_CHURN_TIMER, 0); return; } if (port->sm_churn_actor_timer_counter && !(--port->sm_churn_actor_timer_counter) && port->sm_churn_actor_state == AD_CHURN_MONITOR) { if (port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION) { port->sm_churn_actor_state = AD_NO_CHURN; } else { port->churn_actor_count++; port->sm_churn_actor_state = AD_CHURN; } } if (port->sm_churn_partner_timer_counter && !(--port->sm_churn_partner_timer_counter) && port->sm_churn_partner_state == AD_CHURN_MONITOR) { if (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) { port->sm_churn_partner_state = AD_NO_CHURN; } else { port->churn_partner_count++; port->sm_churn_partner_state = AD_CHURN; } } } /** * ad_tx_machine - handle a port's tx state machine * @port: the port we're looking at */ static void ad_tx_machine(struct port *port) { /* check if tx timer expired, to verify that we do not send more than * 3 packets per second */ if (!port->sm_tx_timer_counter || !(--port->sm_tx_timer_counter)) { /* check if there is something to send */ if (port->ntt && (port->sm_vars & AD_PORT_LACP_ENABLED)) { __update_lacpdu_from_port(port); if (ad_lacpdu_send(port) >= 0) { slave_dbg(port->slave->bond->dev, port->slave->dev, "Sent LACPDU on port %d\n", port->actor_port_number); /* mark ntt as false, so it will not be sent * again until demanded */ port->ntt = false; /* restart tx timer(to verify that we will not * exceed AD_MAX_TX_IN_SECOND */ port->sm_tx_timer_counter = ad_ticks_per_sec / AD_MAX_TX_IN_SECOND; } } } } /** * ad_periodic_machine - handle a port's periodic state machine * @port: the port we're looking at * * Turn ntt flag on priodically to perform periodic transmission of lacpdu's. */ static void ad_periodic_machine(struct port *port) { periodic_states_t last_state; /* keep current state machine state to compare later if it was changed */ last_state = port->sm_periodic_state; /* check if port was reinitialized */ if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) || (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))) { port->sm_periodic_state = AD_NO_PERIODIC; } /* check if state machine should change state */ else if (port->sm_periodic_timer_counter) { /* check if periodic state machine expired */ if (!(--port->sm_periodic_timer_counter)) { /* if expired then do tx */ port->sm_periodic_state = AD_PERIODIC_TX; } else { /* If not expired, check if there is some new timeout * parameter from the partner state */ switch (port->sm_periodic_state) { case AD_FAST_PERIODIC: if (!(port->partner_oper.port_state & LACP_STATE_LACP_TIMEOUT)) port->sm_periodic_state = AD_SLOW_PERIODIC; break; case AD_SLOW_PERIODIC: if ((port->partner_oper.port_state & LACP_STATE_LACP_TIMEOUT)) { port->sm_periodic_timer_counter = 0; port->sm_periodic_state = AD_PERIODIC_TX; } break; default: break; } } } else { switch (port->sm_periodic_state) { case AD_NO_PERIODIC: port->sm_periodic_state = AD_FAST_PERIODIC; break; case AD_PERIODIC_TX: if (!(port->partner_oper.port_state & LACP_STATE_LACP_TIMEOUT)) port->sm_periodic_state = AD_SLOW_PERIODIC; else port->sm_periodic_state = AD_FAST_PERIODIC; break; default: break; } } /* check if the state machine was changed */ if (port->sm_periodic_state != last_state) { slave_dbg(port->slave->bond->dev, port->slave->dev, "Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_periodic_state); switch (port->sm_periodic_state) { case AD_NO_PERIODIC: port->sm_periodic_timer_counter = 0; break; case AD_FAST_PERIODIC: /* decrement 1 tick we lost in the PERIODIC_TX cycle */ port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_FAST_PERIODIC_TIME))-1; break; case AD_SLOW_PERIODIC: /* decrement 1 tick we lost in the PERIODIC_TX cycle */ port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_SLOW_PERIODIC_TIME))-1; break; case AD_PERIODIC_TX: port->ntt = true; break; default: break; } } } /** * ad_port_selection_logic - select aggregation groups * @port: the port we're looking at * @update_slave_arr: Does slave array need update? * * Select aggregation groups, and assign each port for it's aggregetor. The * selection logic is called in the inititalization (after all the handshkes), * and after every lacpdu receive (if selected is off). */ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr) { struct aggregator *aggregator, *free_aggregator = NULL, *temp_aggregator; struct port *last_port = NULL, *curr_port; struct list_head *iter; struct bonding *bond; struct slave *slave; int found = 0; /* if the port is already Selected, do nothing */ if (port->sm_vars & AD_PORT_SELECTED) return; bond = __get_bond_by_port(port); /* if the port is connected to other aggregator, detach it */ if (port->aggregator) { /* detach the port from its former aggregator */ temp_aggregator = port->aggregator; for (curr_port = temp_aggregator->lag_ports; curr_port; last_port = curr_port, curr_port = curr_port->next_port_in_aggregator) { if (curr_port == port) { temp_aggregator->num_of_ports--; /* if it is the first port attached to the * aggregator */ if (!last_port) { temp_aggregator->lag_ports = port->next_port_in_aggregator; } else { /* not the first port attached to the * aggregator */ last_port->next_port_in_aggregator = port->next_port_in_aggregator; } /* clear the port's relations to this * aggregator */ port->aggregator = NULL; port->next_port_in_aggregator = NULL; port->actor_port_aggregator_identifier = 0; slave_dbg(bond->dev, port->slave->dev, "Port %d left LAG %d\n", port->actor_port_number, temp_aggregator->aggregator_identifier); /* if the aggregator is empty, clear its * parameters, and set it ready to be attached */ if (!temp_aggregator->lag_ports) ad_clear_agg(temp_aggregator); break; } } if (!curr_port) { /* meaning: the port was related to an aggregator * but was not on the aggregator port list */ net_warn_ratelimited("%s: (slave %s): Warning: Port %d was related to aggregator %d but was not on its port list\n", port->slave->bond->dev->name, port->slave->dev->name, port->actor_port_number, port->aggregator->aggregator_identifier); } } /* search on all aggregators for a suitable aggregator for this port */ bond_for_each_slave(bond, slave, iter) { aggregator = &(SLAVE_AD_INFO(slave)->aggregator); /* keep a free aggregator for later use(if needed) */ if (!aggregator->lag_ports) { if (!free_aggregator) free_aggregator = aggregator; continue; } /* check if current aggregator suits us */ if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && /* if all parameters match AND */ MAC_ADDRESS_EQUAL(&(aggregator->partner_system), &(port->partner_oper.system)) && (aggregator->partner_system_priority == port->partner_oper.system_priority) && (aggregator->partner_oper_aggregator_key == port->partner_oper.key) ) && ((__agg_has_partner(aggregator) && /* partner answers */ !aggregator->is_individual) /* but is not individual OR */ ) ) { /* attach to the founded aggregator */ port->aggregator = aggregator; port->actor_port_aggregator_identifier = port->aggregator->aggregator_identifier; port->next_port_in_aggregator = aggregator->lag_ports; port->aggregator->num_of_ports++; aggregator->lag_ports = port; slave_dbg(bond->dev, slave->dev, "Port %d joined LAG %d (existing LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier); /* mark this port as selected */ port->sm_vars |= AD_PORT_SELECTED; found = 1; break; } } /* the port couldn't find an aggregator - attach it to a new * aggregator */ if (!found) { if (free_aggregator) { /* assign port a new aggregator */ port->aggregator = free_aggregator; port->actor_port_aggregator_identifier = port->aggregator->aggregator_identifier; /* update the new aggregator's parameters * if port was responsed from the end-user */ if (port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS) /* if port is full duplex */ port->aggregator->is_individual = false; else port->aggregator->is_individual = true; port->aggregator->actor_admin_aggregator_key = port->actor_admin_port_key; port->aggregator->actor_oper_aggregator_key = port->actor_oper_port_key; port->aggregator->partner_system = port->partner_oper.system; port->aggregator->partner_system_priority = port->partner_oper.system_priority; port->aggregator->partner_oper_aggregator_key = port->partner_oper.key; port->aggregator->receive_state = 1; port->aggregator->transmit_state = 1; port->aggregator->lag_ports = port; port->aggregator->num_of_ports++; /* mark this port as selected */ port->sm_vars |= AD_PORT_SELECTED; slave_dbg(bond->dev, port->slave->dev, "Port %d joined LAG %d (new LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier); } else { slave_err(bond->dev, port->slave->dev, "Port %d did not find a suitable aggregator\n", port->actor_port_number); return; } } /* if all aggregator's ports are READY_N == TRUE, set ready=TRUE * in all aggregator's ports, else set ready=FALSE in all * aggregator's ports */ __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator)); aggregator = __get_first_agg(port); ad_agg_selection_logic(aggregator, update_slave_arr); if (!port->aggregator->is_active) port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION; } /* Decide if "agg" is a better choice for the new active aggregator that * the current best, according to the ad_select policy. */ static struct aggregator *ad_agg_selection_test(struct aggregator *best, struct aggregator *curr) { /* 0. If no best, select current. * * 1. If the current agg is not individual, and the best is * individual, select current. * * 2. If current agg is individual and the best is not, keep best. * * 3. Therefore, current and best are both individual or both not * individual, so: * * 3a. If current agg partner replied, and best agg partner did not, * select current. * * 3b. If current agg partner did not reply and best agg partner * did reply, keep best. * * 4. Therefore, current and best both have partner replies or * both do not, so perform selection policy: * * BOND_AD_COUNT: Select by count of ports. If count is equal, * select by bandwidth. * * BOND_AD_STABLE, BOND_AD_BANDWIDTH: Select by bandwidth. */ if (!best) return curr; if (!curr->is_individual && best->is_individual) return curr; if (curr->is_individual && !best->is_individual) return best; if (__agg_has_partner(curr) && !__agg_has_partner(best)) return curr; if (!__agg_has_partner(curr) && __agg_has_partner(best)) return best; switch (__get_agg_selection_mode(curr->lag_ports)) { case BOND_AD_COUNT: if (__agg_active_ports(curr) > __agg_active_ports(best)) return curr; if (__agg_active_ports(curr) < __agg_active_ports(best)) return best; fallthrough; case BOND_AD_STABLE: case BOND_AD_BANDWIDTH: if (__get_agg_bandwidth(curr) > __get_agg_bandwidth(best)) return curr; break; default: net_warn_ratelimited("%s: (slave %s): Impossible agg select mode %d\n", curr->slave->bond->dev->name, curr->slave->dev->name, __get_agg_selection_mode(curr->lag_ports)); break; } return best; } static int agg_device_up(const struct aggregator *agg) { struct port *port = agg->lag_ports; if (!port) return 0; for (port = agg->lag_ports; port; port = port->next_port_in_aggregator) { if (netif_running(port->slave->dev) && netif_carrier_ok(port->slave->dev)) return 1; } return 0; } /** * ad_agg_selection_logic - select an aggregation group for a team * @agg: the aggregator we're looking at * @update_slave_arr: Does slave array need update? * * It is assumed that only one aggregator may be selected for a team. * * The logic of this function is to select the aggregator according to * the ad_select policy: * * BOND_AD_STABLE: select the aggregator with the most ports attached to * it, and to reselect the active aggregator only if the previous * aggregator has no more ports related to it. * * BOND_AD_BANDWIDTH: select the aggregator with the highest total * bandwidth, and reselect whenever a link state change takes place or the * set of slaves in the bond changes. * * BOND_AD_COUNT: select the aggregator with largest number of ports * (slaves), and reselect whenever a link state change takes place or the * set of slaves in the bond changes. * * FIXME: this function MUST be called with the first agg in the bond, or * __get_active_agg() won't work correctly. This function should be better * called with the bond itself, and retrieve the first agg from it. */ static void ad_agg_selection_logic(struct aggregator *agg, bool *update_slave_arr) { struct aggregator *best, *active, *origin; struct bonding *bond = agg->slave->bond; struct list_head *iter; struct slave *slave; struct port *port; rcu_read_lock(); origin = agg; active = __get_active_agg(agg); best = (active && agg_device_up(active)) ? active : NULL; bond_for_each_slave_rcu(bond, slave, iter) { agg = &(SLAVE_AD_INFO(slave)->aggregator); agg->is_active = 0; if (__agg_active_ports(agg) && agg_device_up(agg)) best = ad_agg_selection_test(best, agg); } if (best && __get_agg_selection_mode(best->lag_ports) == BOND_AD_STABLE) { /* For the STABLE policy, don't replace the old active * aggregator if it's still active (it has an answering * partner) or if both the best and active don't have an * answering partner. */ if (active && active->lag_ports && __agg_active_ports(active) && (__agg_has_partner(active) || (!__agg_has_partner(active) && !__agg_has_partner(best)))) { if (!(!active->actor_oper_aggregator_key && best->actor_oper_aggregator_key)) { best = NULL; active->is_active = 1; } } } if (best && (best == active)) { best = NULL; active->is_active = 1; } /* if there is new best aggregator, activate it */ if (best) { netdev_dbg(bond->dev, "(slave %s): best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n", best->slave ? best->slave->dev->name : "NULL", best->aggregator_identifier, best->num_of_ports, best->actor_oper_aggregator_key, best->partner_oper_aggregator_key, best->is_individual, best->is_active); netdev_dbg(bond->dev, "(slave %s): best ports %p slave %p\n", best->slave ? best->slave->dev->name : "NULL", best->lag_ports, best->slave); bond_for_each_slave_rcu(bond, slave, iter) { agg = &(SLAVE_AD_INFO(slave)->aggregator); slave_dbg(bond->dev, slave->dev, "Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n", agg->aggregator_identifier, agg->num_of_ports, agg->actor_oper_aggregator_key, agg->partner_oper_aggregator_key, agg->is_individual, agg->is_active); } /* check if any partner replies */ if (best->is_individual) net_warn_ratelimited("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n", bond->dev->name); best->is_active = 1; netdev_dbg(bond->dev, "(slave %s): LAG %d chosen as the active LAG\n", best->slave ? best->slave->dev->name : "NULL", best->aggregator_identifier); netdev_dbg(bond->dev, "(slave %s): Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n", best->slave ? best->slave->dev->name : "NULL", best->aggregator_identifier, best->num_of_ports, best->actor_oper_aggregator_key, best->partner_oper_aggregator_key, best->is_individual, best->is_active); /* disable the ports that were related to the former * active_aggregator */ if (active) { for (port = active->lag_ports; port; port = port->next_port_in_aggregator) { __disable_port(port); } } /* Slave array needs update. */ *update_slave_arr = true; } /* if the selected aggregator is of join individuals * (partner_system is NULL), enable their ports */ active = __get_active_agg(origin); if (active) { if (!__agg_has_partner(active)) { for (port = active->lag_ports; port; port = port->next_port_in_aggregator) { __enable_port(port); } *update_slave_arr = true; } } rcu_read_unlock(); bond_3ad_set_carrier(bond); } /** * ad_clear_agg - clear a given aggregator's parameters * @aggregator: the aggregator we're looking at */ static void ad_clear_agg(struct aggregator *aggregator) { if (aggregator) { aggregator->is_individual = false; aggregator->actor_admin_aggregator_key = 0; aggregator->actor_oper_aggregator_key = 0; eth_zero_addr(aggregator->partner_system.mac_addr_value); aggregator->partner_system_priority = 0; aggregator->partner_oper_aggregator_key = 0; aggregator->receive_state = 0; aggregator->transmit_state = 0; aggregator->lag_ports = NULL; aggregator->is_active = 0; aggregator->num_of_ports = 0; pr_debug("%s: LAG %d was cleared\n", aggregator->slave ? aggregator->slave->dev->name : "NULL", aggregator->aggregator_identifier); } } /** * ad_initialize_agg - initialize a given aggregator's parameters * @aggregator: the aggregator we're looking at */ static void ad_initialize_agg(struct aggregator *aggregator) { if (aggregator) { ad_clear_agg(aggregator); eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value); aggregator->aggregator_identifier = 0; aggregator->slave = NULL; } } /** * ad_initialize_port - initialize a given port's parameters * @port: the port we're looking at * @bond_params: bond parameters we will use */ static void ad_initialize_port(struct port *port, const struct bond_params *bond_params) { static const struct port_params tmpl = { .system_priority = 0xffff, .key = 1, .port_number = 1, .port_priority = 0xff, .port_state = 0, }; static const struct lacpdu lacpdu = { .subtype = 0x01, .version_number = 0x01, .tlv_type_actor_info = 0x01, .actor_information_length = 0x14, .tlv_type_partner_info = 0x02, .partner_information_length = 0x14, .tlv_type_collector_info = 0x03, .collector_information_length = 0x10, .collector_max_delay = htons(AD_COLLECTOR_MAX_DELAY), }; if (port) { port->actor_port_priority = 0xff; port->actor_port_aggregator_identifier = 0; port->ntt = false; port->actor_admin_port_state = LACP_STATE_AGGREGATION; port->actor_oper_port_state = LACP_STATE_AGGREGATION; if (bond_params->lacp_active) { port->actor_admin_port_state |= LACP_STATE_LACP_ACTIVITY; port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY; } if (bond_params->lacp_fast) port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT; memcpy(&port->partner_admin, &tmpl, sizeof(tmpl)); memcpy(&port->partner_oper, &tmpl, sizeof(tmpl)); port->is_enabled = true; /* private parameters */ port->sm_vars = AD_PORT_BEGIN | AD_PORT_LACP_ENABLED; port->sm_rx_state = 0; port->sm_rx_timer_counter = 0; port->sm_periodic_state = 0; port->sm_periodic_timer_counter = 0; port->sm_mux_state = 0; port->sm_mux_timer_counter = 0; port->sm_tx_state = 0; port->aggregator = NULL; port->next_port_in_aggregator = NULL; port->transaction_id = 0; port->sm_churn_actor_timer_counter = 0; port->sm_churn_actor_state = 0; port->churn_actor_count = 0; port->sm_churn_partner_timer_counter = 0; port->sm_churn_partner_state = 0; port->churn_partner_count = 0; memcpy(&port->lacpdu, &lacpdu, sizeof(lacpdu)); } } /** * ad_enable_collecting - enable a port's receive * @port: the port we're looking at * * Enable @port if it's in an active aggregator */ static void ad_enable_collecting(struct port *port) { if (port->aggregator->is_active) { struct slave *slave = port->slave; slave_dbg(slave->bond->dev, slave->dev, "Enabling collecting on port %d (LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier); __enable_collecting_port(port); } } /** * ad_disable_distributing - disable a port's transmit * @port: the port we're looking at * @update_slave_arr: Does slave array need update? */ static void ad_disable_distributing(struct port *port, bool *update_slave_arr) { if (port->aggregator && __agg_has_partner(port->aggregator)) { slave_dbg(port->slave->bond->dev, port->slave->dev, "Disabling distributing on port %d (LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier); __disable_distributing_port(port); /* Slave array needs an update */ *update_slave_arr = true; } } /** * ad_enable_collecting_distributing - enable a port's transmit/receive * @port: the port we're looking at * @update_slave_arr: Does slave array need update? * * Enable @port if it's in an active aggregator */ static void ad_enable_collecting_distributing(struct port *port, bool *update_slave_arr) { if (port->aggregator->is_active) { slave_dbg(port->slave->bond->dev, port->slave->dev, "Enabling port %d (LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier); __enable_port(port); /* Slave array needs update */ *update_slave_arr = true; /* Should notify peers if possible */ ad_cond_set_peer_notif(port); } } /** * ad_disable_collecting_distributing - disable a port's transmit/receive * @port: the port we're looking at * @update_slave_arr: Does slave array need update? */ static void ad_disable_collecting_distributing(struct port *port, bool *update_slave_arr) { if (port->aggregator && __agg_has_partner(port->aggregator)) { slave_dbg(port->slave->bond->dev, port->slave->dev, "Disabling port %d (LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier); __disable_port(port); /* Slave array needs an update */ *update_slave_arr = true; } } /** * ad_marker_info_received - handle receive of a Marker information frame * @marker_info: Marker info received * @port: the port we're looking at */ static void ad_marker_info_received(struct bond_marker *marker_info, struct port *port) { struct bond_marker marker; atomic64_inc(&SLAVE_AD_INFO(port->slave)->stats.marker_rx); atomic64_inc(&BOND_AD_INFO(port->slave->bond).stats.marker_rx); /* copy the received marker data to the response marker */ memcpy(&marker, marker_info, sizeof(struct bond_marker)); /* change the marker subtype to marker response */ marker.tlv_type = AD_MARKER_RESPONSE_SUBTYPE; /* send the marker response */ if (ad_marker_send(port, &marker) >= 0) slave_dbg(port->slave->bond->dev, port->slave->dev, "Sent Marker Response on port %d\n", port->actor_port_number); } /** * ad_marker_response_received - handle receive of a marker response frame * @marker: marker PDU received * @port: the port we're looking at * * This function does nothing since we decided not to implement send and handle * response for marker PDU's, in this stage, but only to respond to marker * information. */ static void ad_marker_response_received(struct bond_marker *marker, struct port *port) { atomic64_inc(&SLAVE_AD_INFO(port->slave)->stats.marker_resp_rx); atomic64_inc(&BOND_AD_INFO(port->slave->bond).stats.marker_resp_rx); /* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */ } /* ========= AD exported functions to the main bonding code ========= */ /* Check aggregators status in team every T seconds */ #define AD_AGGREGATOR_SELECTION_TIMER 8 /** * bond_3ad_initiate_agg_selection - initate aggregator selection * @bond: bonding struct * @timeout: timeout value to set * * Set the aggregation selection timer, to initiate an agg selection in * the very near future. Called during first initialization, and during * any down to up transitions of the bond. */ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout) { atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout); } /** * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures * @bond: bonding struct to work on * * Can be called only after the mac address of the bond is set. */ void bond_3ad_initialize(struct bonding *bond) { BOND_AD_INFO(bond).aggregator_identifier = 0; BOND_AD_INFO(bond).system.sys_priority = bond->params.ad_actor_sys_prio; if (is_zero_ether_addr(bond->params.ad_actor_system)) BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr); else BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->params.ad_actor_system); bond_3ad_initiate_agg_selection(bond, AD_AGGREGATOR_SELECTION_TIMER * ad_ticks_per_sec); } /** * bond_3ad_bind_slave - initialize a slave's port * @slave: slave struct to work on * * Returns: 0 on success * < 0 on error */ void bond_3ad_bind_slave(struct slave *slave) { struct bonding *bond = bond_get_bond_by_slave(slave); struct port *port; struct aggregator *aggregator; /* check that the slave has not been initialized yet. */ if (SLAVE_AD_INFO(slave)->port.slave != slave) { /* port initialization */ port = &(SLAVE_AD_INFO(slave)->port); ad_initialize_port(port, &bond->params); port->slave = slave; port->actor_port_number = SLAVE_AD_INFO(slave)->id; /* key is determined according to the link speed, duplex and * user key */ port->actor_admin_port_key = bond->params.ad_user_port_key << 6; ad_update_actor_keys(port, false); /* actor system is the bond's system */ __ad_actor_update_port(port); /* tx timer(to verify that no more than MAX_TX_IN_SECOND * lacpdu's are sent in one second) */ port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND; __disable_port(port); /* aggregator initialization */ aggregator = &(SLAVE_AD_INFO(slave)->aggregator); ad_initialize_agg(aggregator); aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr); aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier; aggregator->slave = slave; aggregator->is_active = 0; aggregator->num_of_ports = 0; } } /** * bond_3ad_unbind_slave - deinitialize a slave's port * @slave: slave struct to work on * * Search for the aggregator that is related to this port, remove the * aggregator and assign another aggregator for other port related to it * (if any), and remove the port. */ void bond_3ad_unbind_slave(struct slave *slave) { struct port *port, *prev_port, *temp_port; struct aggregator *aggregator, *new_aggregator, *temp_aggregator; int select_new_active_agg = 0; struct bonding *bond = slave->bond; struct slave *slave_iter; struct list_head *iter; bool dummy_slave_update; /* Ignore this value as caller updates array */ /* Sync against bond_3ad_state_machine_handler() */ spin_lock_bh(&bond->mode_lock); aggregator = &(SLAVE_AD_INFO(slave)->aggregator); port = &(SLAVE_AD_INFO(slave)->port); /* if slave is null, the whole port is not initialized */ if (!port->slave) { slave_warn(bond->dev, slave->dev, "Trying to unbind an uninitialized port\n"); goto out; } slave_dbg(bond->dev, slave->dev, "Unbinding Link Aggregation Group %d\n", aggregator->aggregator_identifier); /* Tell the partner that this port is not suitable for aggregation */ port->actor_oper_port_state &= ~LACP_STATE_SYNCHRONIZATION; port->actor_oper_port_state &= ~LACP_STATE_COLLECTING; port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING; port->actor_oper_port_state &= ~LACP_STATE_AGGREGATION; __update_lacpdu_from_port(port); ad_lacpdu_send(port); /* check if this aggregator is occupied */ if (aggregator->lag_ports) { /* check if there are other ports related to this aggregator * except the port related to this slave(thats ensure us that * there is a reason to search for new aggregator, and that we * will find one */ if ((aggregator->lag_ports != port) || (aggregator->lag_ports->next_port_in_aggregator)) { /* find new aggregator for the related port(s) */ bond_for_each_slave(bond, slave_iter, iter) { new_aggregator = &(SLAVE_AD_INFO(slave_iter)->aggregator); /* if the new aggregator is empty, or it is * connected to our port only */ if (!new_aggregator->lag_ports || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator)) break; } if (!slave_iter) new_aggregator = NULL; /* if new aggregator found, copy the aggregator's * parameters and connect the related lag_ports to the * new aggregator */ if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) { slave_dbg(bond->dev, slave->dev, "Some port(s) related to LAG %d - replacing with LAG %d\n", aggregator->aggregator_identifier, new_aggregator->aggregator_identifier); if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) { slave_info(bond->dev, slave->dev, "Removing an active aggregator\n"); select_new_active_agg = 1; } new_aggregator->is_individual = aggregator->is_individual; new_aggregator->actor_admin_aggregator_key = aggregator->actor_admin_aggregator_key; new_aggregator->actor_oper_aggregator_key = aggregator->actor_oper_aggregator_key; new_aggregator->partner_system = aggregator->partner_system; new_aggregator->partner_system_priority = aggregator->partner_system_priority; new_aggregator->partner_oper_aggregator_key = aggregator->partner_oper_aggregator_key; new_aggregator->receive_state = aggregator->receive_state; new_aggregator->transmit_state = aggregator->transmit_state; new_aggregator->lag_ports = aggregator->lag_ports; new_aggregator->is_active = aggregator->is_active; new_aggregator->num_of_ports = aggregator->num_of_ports; /* update the information that is written on * the ports about the aggregator */ for (temp_port = aggregator->lag_ports; temp_port; temp_port = temp_port->next_port_in_aggregator) { temp_port->aggregator = new_aggregator; temp_port->actor_port_aggregator_identifier = new_aggregator->aggregator_identifier; } ad_clear_agg(aggregator); if (select_new_active_agg) ad_agg_selection_logic(__get_first_agg(port), &dummy_slave_update); } else { slave_warn(bond->dev, slave->dev, "unbinding aggregator, and could not find a new aggregator for its ports\n"); } } else { /* in case that the only port related to this * aggregator is the one we want to remove */ select_new_active_agg = aggregator->is_active; ad_clear_agg(aggregator); if (select_new_active_agg) { slave_info(bond->dev, slave->dev, "Removing an active aggregator\n"); /* select new active aggregator */ temp_aggregator = __get_first_agg(port); if (temp_aggregator) ad_agg_selection_logic(temp_aggregator, &dummy_slave_update); } } } slave_dbg(bond->dev, slave->dev, "Unbinding port %d\n", port->actor_port_number); /* find the aggregator that this port is connected to */ bond_for_each_slave(bond, slave_iter, iter) { temp_aggregator = &(SLAVE_AD_INFO(slave_iter)->aggregator); prev_port = NULL; /* search the port in the aggregator's related ports */ for (temp_port = temp_aggregator->lag_ports; temp_port; prev_port = temp_port, temp_port = temp_port->next_port_in_aggregator) { if (temp_port == port) { /* the aggregator found - detach the port from * this aggregator */ if (prev_port) prev_port->next_port_in_aggregator = temp_port->next_port_in_aggregator; else temp_aggregator->lag_ports = temp_port->next_port_in_aggregator; temp_aggregator->num_of_ports--; if (__agg_active_ports(temp_aggregator) == 0) { select_new_active_agg = temp_aggregator->is_active; if (temp_aggregator->num_of_ports == 0) ad_clear_agg(temp_aggregator); if (select_new_active_agg) { slave_info(bond->dev, slave->dev, "Removing an active aggregator\n"); /* select new active aggregator */ ad_agg_selection_logic(__get_first_agg(port), &dummy_slave_update); } } break; } } } port->slave = NULL; out: spin_unlock_bh(&bond->mode_lock); } /** * bond_3ad_update_ad_actor_settings - reflect change of actor settings to ports * @bond: bonding struct to work on * * If an ad_actor setting gets changed we need to update the individual port * settings so the bond device will use the new values when it gets upped. */ void bond_3ad_update_ad_actor_settings(struct bonding *bond) { struct list_head *iter; struct slave *slave; ASSERT_RTNL(); BOND_AD_INFO(bond).system.sys_priority = bond->params.ad_actor_sys_prio; if (is_zero_ether_addr(bond->params.ad_actor_system)) BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr); else BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->params.ad_actor_system); spin_lock_bh(&bond->mode_lock); bond_for_each_slave(bond, slave, iter) { struct port *port = &(SLAVE_AD_INFO(slave))->port; __ad_actor_update_port(port); port->ntt = true; } spin_unlock_bh(&bond->mode_lock); } /** * bond_agg_timer_advance - advance agg_select_timer * @bond: bonding structure * * Return true when agg_select_timer reaches 0. */ static bool bond_agg_timer_advance(struct bonding *bond) { int val, nval; while (1) { val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer); if (!val) return false; nval = val - 1; if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer, val, nval) == val) break; } return nval == 0; } /** * bond_3ad_state_machine_handler - handle state machines timeout * @work: work context to fetch bonding struct to work on from * * The state machine handling concept in this module is to check every tick * which state machine should operate any function. The execution order is * round robin, so when we have an interaction between state machines, the * reply of one to each other might be delayed until next tick. * * This function also complete the initialization when the agg_select_timer * times out, and it selects an aggregator for the ports that are yet not * related to any aggregator, and selects the active aggregator for a bond. */ void bond_3ad_state_machine_handler(struct work_struct *work) { struct bonding *bond = container_of(work, struct bonding, ad_work.work); struct aggregator *aggregator; struct list_head *iter; struct slave *slave; struct port *port; bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; bool update_slave_arr = false; /* Lock to protect data accessed by all (e.g., port->sm_vars) and * against running with bond_3ad_unbind_slave. ad_rx_machine may run * concurrently due to incoming LACPDU as well. */ spin_lock_bh(&bond->mode_lock); rcu_read_lock(); /* check if there are any slaves */ if (!bond_has_slaves(bond)) goto re_arm; if (bond_agg_timer_advance(bond)) { slave = bond_first_slave_rcu(bond); port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL; /* select the active aggregator for the bond */ if (port) { if (!port->slave) { net_warn_ratelimited("%s: Warning: bond's first port is uninitialized\n", bond->dev->name); goto re_arm; } aggregator = __get_first_agg(port); ad_agg_selection_logic(aggregator, &update_slave_arr); } bond_3ad_set_carrier(bond); } /* for each port run the state machines */ bond_for_each_slave_rcu(bond, slave, iter) { port = &(SLAVE_AD_INFO(slave)->port); if (!port->slave) { net_warn_ratelimited("%s: Warning: Found an uninitialized port\n", bond->dev->name); goto re_arm; } ad_rx_machine(NULL, port); ad_periodic_machine(port); ad_port_selection_logic(port, &update_slave_arr); ad_mux_machine(port, &update_slave_arr); ad_tx_machine(port); ad_churn_machine(port); /* turn off the BEGIN bit, since we already handled it */ if (port->sm_vars & AD_PORT_BEGIN) port->sm_vars &= ~AD_PORT_BEGIN; } re_arm: bond_for_each_slave_rcu(bond, slave, iter) { if (slave->should_notify) { should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW; break; } } rcu_read_unlock(); spin_unlock_bh(&bond->mode_lock); if (update_slave_arr) bond_slave_arr_work_rearm(bond, 0); if (should_notify_rtnl && rtnl_trylock()) { bond_slave_state_notify(bond); rtnl_unlock(); } queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); } /** * bond_3ad_rx_indication - handle a received frame * @lacpdu: received lacpdu * @slave: slave struct to work on * * It is assumed that frames that were sent on this NIC don't returned as new * received frames (loopback). Since only the payload is given to this * function, it check for loopback. */ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave) { struct bonding *bond = slave->bond; int ret = RX_HANDLER_ANOTHER; struct bond_marker *marker; struct port *port; atomic64_t *stat; port = &(SLAVE_AD_INFO(slave)->port); if (!port->slave) { net_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n", slave->dev->name, slave->bond->dev->name); return ret; } switch (lacpdu->subtype) { case AD_TYPE_LACPDU: ret = RX_HANDLER_CONSUMED; slave_dbg(slave->bond->dev, slave->dev, "Received LACPDU on port %d\n", port->actor_port_number); /* Protect against concurrent state machines */ spin_lock(&slave->bond->mode_lock); ad_rx_machine(lacpdu, port); spin_unlock(&slave->bond->mode_lock); break; case AD_TYPE_MARKER: ret = RX_HANDLER_CONSUMED; /* No need to convert fields to Little Endian since we * don't use the marker's fields. */ marker = (struct bond_marker *)lacpdu; switch (marker->tlv_type) { case AD_MARKER_INFORMATION_SUBTYPE: slave_dbg(slave->bond->dev, slave->dev, "Received Marker Information on port %d\n", port->actor_port_number); ad_marker_info_received(marker, port); break; case AD_MARKER_RESPONSE_SUBTYPE: slave_dbg(slave->bond->dev, slave->dev, "Received Marker Response on port %d\n", port->actor_port_number); ad_marker_response_received(marker, port); break; default: slave_dbg(slave->bond->dev, slave->dev, "Received an unknown Marker subtype on port %d\n", port->actor_port_number); stat = &SLAVE_AD_INFO(slave)->stats.marker_unknown_rx; atomic64_inc(stat); stat = &BOND_AD_INFO(bond).stats.marker_unknown_rx; atomic64_inc(stat); } break; default: atomic64_inc(&SLAVE_AD_INFO(slave)->stats.lacpdu_unknown_rx); atomic64_inc(&BOND_AD_INFO(bond).stats.lacpdu_unknown_rx); } return ret; } /** * ad_update_actor_keys - Update the oper / admin keys for a port based on * its current speed and duplex settings. * * @port: the port we'are looking at * @reset: Boolean to just reset the speed and the duplex part of the key * * The logic to change the oper / admin keys is: * (a) A full duplex port can participate in LACP with partner. * (b) When the speed is changed, LACP need to be reinitiated. */ static void ad_update_actor_keys(struct port *port, bool reset) { u8 duplex = 0; u16 ospeed = 0, speed = 0; u16 old_oper_key = port->actor_oper_port_key; port->actor_admin_port_key &= ~(AD_SPEED_KEY_MASKS|AD_DUPLEX_KEY_MASKS); if (!reset) { speed = __get_link_speed(port); ospeed = (old_oper_key & AD_SPEED_KEY_MASKS) >> 1; duplex = __get_duplex(port); port->actor_admin_port_key |= (speed << 1) | duplex; } port->actor_oper_port_key = port->actor_admin_port_key; if (old_oper_key != port->actor_oper_port_key) { /* Only 'duplex' port participates in LACP */ if (duplex) port->sm_vars |= AD_PORT_LACP_ENABLED; else port->sm_vars &= ~AD_PORT_LACP_ENABLED; if (!reset) { if (!speed) { slave_err(port->slave->bond->dev, port->slave->dev, "speed changed to 0 on port %d\n", port->actor_port_number); } else if (duplex && ospeed != speed) { /* Speed change restarts LACP state-machine */ port->sm_vars |= AD_PORT_BEGIN; } } } } /** * bond_3ad_adapter_speed_duplex_changed - handle a slave's speed / duplex * change indication * * @slave: slave struct to work on * * Handle reselection of aggregator (if needed) for this port. */ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave) { struct port *port; port = &(SLAVE_AD_INFO(slave)->port); /* if slave is null, the whole port is not initialized */ if (!port->slave) { slave_warn(slave->bond->dev, slave->dev, "speed/duplex changed for uninitialized port\n"); return; } spin_lock_bh(&slave->bond->mode_lock); ad_update_actor_keys(port, false); spin_unlock_bh(&slave->bond->mode_lock); slave_dbg(slave->bond->dev, slave->dev, "Port %d changed speed/duplex\n", port->actor_port_number); } /** * bond_3ad_handle_link_change - handle a slave's link status change indication * @slave: slave struct to work on * @link: whether the link is now up or down * * Handle reselection of aggregator (if needed) for this port. */ void bond_3ad_handle_link_change(struct slave *slave, char link) { struct aggregator *agg; struct port *port; bool dummy; port = &(SLAVE_AD_INFO(slave)->port); /* if slave is null, the whole port is not initialized */ if (!port->slave) { slave_warn(slave->bond->dev, slave->dev, "link status changed for uninitialized port\n"); return; } spin_lock_bh(&slave->bond->mode_lock); /* on link down we are zeroing duplex and speed since * some of the adaptors(ce1000.lan) report full duplex/speed * instead of N/A(duplex) / 0(speed). * * on link up we are forcing recheck on the duplex and speed since * some of he adaptors(ce1000.lan) report. */ if (link == BOND_LINK_UP) { port->is_enabled = true; ad_update_actor_keys(port, false); } else { /* link has failed */ port->is_enabled = false; ad_update_actor_keys(port, true); } agg = __get_first_agg(port); ad_agg_selection_logic(agg, &dummy); spin_unlock_bh(&slave->bond->mode_lock); slave_dbg(slave->bond->dev, slave->dev, "Port %d changed link status to %s\n", port->actor_port_number, link == BOND_LINK_UP ? "UP" : "DOWN"); /* RTNL is held and mode_lock is released so it's safe * to update slave_array here. */ bond_update_slave_arr(slave->bond, NULL); } /** * bond_3ad_set_carrier - set link state for bonding master * @bond: bonding structure * * if we have an active aggregator, we're up, if not, we're down. * Presumes that we cannot have an active aggregator if there are * no slaves with link up. * * This behavior complies with IEEE 802.3 section 43.3.9. * * Called by bond_set_carrier(). Return zero if carrier state does not * change, nonzero if it does. */ int bond_3ad_set_carrier(struct bonding *bond) { struct aggregator *active; struct slave *first_slave; int ret = 1; rcu_read_lock(); first_slave = bond_first_slave_rcu(bond); if (!first_slave) { ret = 0; goto out; } active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator)); if (active) { /* are enough slaves available to consider link up? */ if (__agg_active_ports(active) < bond->params.min_links) { if (netif_carrier_ok(bond->dev)) { netif_carrier_off(bond->dev); goto out; } } else if (!netif_carrier_ok(bond->dev)) { netif_carrier_on(bond->dev); goto out; } } else if (netif_carrier_ok(bond->dev)) { netif_carrier_off(bond->dev); } out: rcu_read_unlock(); return ret; } /** * __bond_3ad_get_active_agg_info - get information of the active aggregator * @bond: bonding struct to work on * @ad_info: ad_info struct to fill with the bond's info * * Returns: 0 on success * < 0 on error */ int __bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) { struct aggregator *aggregator = NULL; struct list_head *iter; struct slave *slave; struct port *port; bond_for_each_slave_rcu(bond, slave, iter) { port = &(SLAVE_AD_INFO(slave)->port); if (port->aggregator && port->aggregator->is_active) { aggregator = port->aggregator; break; } } if (!aggregator) return -1; ad_info->aggregator_id = aggregator->aggregator_identifier; ad_info->ports = __agg_active_ports(aggregator); ad_info->actor_key = aggregator->actor_oper_aggregator_key; ad_info->partner_key = aggregator->partner_oper_aggregator_key; ether_addr_copy(ad_info->partner_system, aggregator->partner_system.mac_addr_value); return 0; } int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) { int ret; rcu_read_lock(); ret = __bond_3ad_get_active_agg_info(bond, ad_info); rcu_read_unlock(); return ret; } int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave) { struct lacpdu *lacpdu, _lacpdu; if (skb->protocol != PKT_TYPE_LACPDU) return RX_HANDLER_ANOTHER; if (!MAC_ADDRESS_EQUAL(eth_hdr(skb)->h_dest, lacpdu_mcast_addr)) return RX_HANDLER_ANOTHER; lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu); if (!lacpdu) { atomic64_inc(&SLAVE_AD_INFO(slave)->stats.lacpdu_illegal_rx); atomic64_inc(&BOND_AD_INFO(bond).stats.lacpdu_illegal_rx); return RX_HANDLER_ANOTHER; } return bond_3ad_rx_indication(lacpdu, slave); } /** * bond_3ad_update_lacp_rate - change the lacp rate * @bond: bonding struct * * When modify lacp_rate parameter via sysfs, * update actor_oper_port_state of each port. * * Hold bond->mode_lock, * so we can modify port->actor_oper_port_state, * no matter bond is up or down. */ void bond_3ad_update_lacp_rate(struct bonding *bond) { struct port *port = NULL; struct list_head *iter; struct slave *slave; int lacp_fast; lacp_fast = bond->params.lacp_fast; spin_lock_bh(&bond->mode_lock); bond_for_each_slave(bond, slave, iter) { port = &(SLAVE_AD_INFO(slave)->port); if (lacp_fast) port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT; else port->actor_oper_port_state &= ~LACP_STATE_LACP_TIMEOUT; } spin_unlock_bh(&bond->mode_lock); } /** * bond_3ad_update_lacp_active - change the lacp active * @bond: bonding struct * * Update actor_oper_port_state when lacp_active is modified. */ void bond_3ad_update_lacp_active(struct bonding *bond) { struct port *port = NULL; struct list_head *iter; struct slave *slave; int lacp_active; lacp_active = bond->params.lacp_active; spin_lock_bh(&bond->mode_lock); bond_for_each_slave(bond, slave, iter) { port = &(SLAVE_AD_INFO(slave)->port); if (lacp_active) port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY; else port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY; } spin_unlock_bh(&bond->mode_lock); } size_t bond_3ad_stats_size(void) { return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_TX */ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_UNKNOWN_RX */ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_ILLEGAL_RX */ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_RX */ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_TX */ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_RESP_RX */ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_RESP_TX */ nla_total_size_64bit(sizeof(u64)); /* BOND_3AD_STAT_MARKER_UNKNOWN_RX */ } int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats) { u64 val; val = atomic64_read(&stats->lacpdu_rx); if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_RX, val, BOND_3AD_STAT_PAD)) return -EMSGSIZE; val = atomic64_read(&stats->lacpdu_tx); if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_TX, val, BOND_3AD_STAT_PAD)) return -EMSGSIZE; val = atomic64_read(&stats->lacpdu_unknown_rx); if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_UNKNOWN_RX, val, BOND_3AD_STAT_PAD)) return -EMSGSIZE; val = atomic64_read(&stats->lacpdu_illegal_rx); if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_ILLEGAL_RX, val, BOND_3AD_STAT_PAD)) return -EMSGSIZE; val = atomic64_read(&stats->marker_rx); if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RX, val, BOND_3AD_STAT_PAD)) return -EMSGSIZE; val = atomic64_read(&stats->marker_tx); if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_TX, val, BOND_3AD_STAT_PAD)) return -EMSGSIZE; val = atomic64_read(&stats->marker_resp_rx); if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RESP_RX, val, BOND_3AD_STAT_PAD)) return -EMSGSIZE; val = atomic64_read(&stats->marker_resp_tx); if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RESP_TX, val, BOND_3AD_STAT_PAD)) return -EMSGSIZE; val = atomic64_read(&stats->marker_unknown_rx); if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_UNKNOWN_RX, val, BOND_3AD_STAT_PAD)) return -EMSGSIZE; return 0; } |
| 9883 9878 220 220 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 | // SPDX-License-Identifier: GPL-2.0-only /* Common code for 32 and 64-bit NUMA */ #include <linux/acpi.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/of.h> #include <linux/string.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/mmzone.h> #include <linux/ctype.h> #include <linux/nodemask.h> #include <linux/sched.h> #include <linux/topology.h> #include <linux/sort.h> #include <linux/numa_memblks.h> #include <asm/e820/api.h> #include <asm/proto.h> #include <asm/dma.h> #include <asm/numa.h> #include <asm/amd/nb.h> #include "mm_internal.h" int numa_off; static __init int numa_setup(char *opt) { if (!opt) return -EINVAL; if (!strncmp(opt, "off", 3)) numa_off = 1; if (!strncmp(opt, "fake=", 5)) return numa_emu_cmdline(opt + 5); if (!strncmp(opt, "noacpi", 6)) disable_srat(); if (!strncmp(opt, "nohmat", 6)) disable_hmat(); return 0; } early_param("numa", numa_setup); /* * apicid, cpu, node mappings */ s16 __apicid_to_node[MAX_LOCAL_APIC] = { [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE }; int numa_cpu_node(int cpu) { u32 apicid = early_per_cpu(x86_cpu_to_apicid, cpu); if (apicid != BAD_APICID) return __apicid_to_node[apicid]; return NUMA_NO_NODE; } cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; EXPORT_SYMBOL(node_to_cpumask_map); /* * Map cpu index to node index */ DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); void numa_set_node(int cpu, int node) { int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); /* early setting, no percpu area yet */ if (cpu_to_node_map) { cpu_to_node_map[cpu] = node; return; } #ifdef CONFIG_DEBUG_PER_CPU_MAPS if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); dump_stack(); return; } #endif per_cpu(x86_cpu_to_node_map, cpu) = node; set_cpu_numa_node(cpu, node); } void numa_clear_node(int cpu) { numa_set_node(cpu, NUMA_NO_NODE); } /* * Allocate node_to_cpumask_map based on number of available nodes * Requires node_possible_map to be valid. * * Note: cpumask_of_node() is not valid until after this is done. * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) */ void __init setup_node_to_cpumask_map(void) { unsigned int node; /* setup nr_node_ids if not done yet */ if (nr_node_ids == MAX_NUMNODES) setup_nr_node_ids(); /* allocate the map */ for (node = 0; node < nr_node_ids; node++) alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); /* cpumask_of_node() will now work */ pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); } static int __init numa_register_nodes(void) { int nid; if (!memblock_validate_numa_coverage(SZ_1M)) return -EINVAL; /* Finally register nodes. */ for_each_node_mask(nid, node_possible_map) { unsigned long start_pfn, end_pfn; /* * Note, get_pfn_range_for_nid() depends on * memblock_set_node() having already happened */ get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); if (start_pfn >= end_pfn) continue; alloc_node_data(nid); node_set_online(nid); } /* Dump memblock with node info and return. */ memblock_dump_all(); return 0; } /* * There are unfortunately some poorly designed mainboards around that * only connect memory to a single CPU. This breaks the 1:1 cpu->node * mapping. To avoid this fill in the mapping for all possible CPUs, * as the number of CPUs is not known yet. We round robin the existing * nodes. */ static void __init numa_init_array(void) { int rr, i; rr = first_node(node_online_map); for (i = 0; i < nr_cpu_ids; i++) { if (early_cpu_to_node(i) != NUMA_NO_NODE) continue; numa_set_node(i, rr); rr = next_node_in(rr, node_online_map); } } static int __init numa_init(int (*init_func)(void)) { int i; int ret; for (i = 0; i < MAX_LOCAL_APIC; i++) set_apicid_to_node(i, NUMA_NO_NODE); ret = numa_memblks_init(init_func, /* memblock_force_top_down */ true); if (ret < 0) return ret; ret = numa_register_nodes(); if (ret < 0) return ret; for (i = 0; i < nr_cpu_ids; i++) { int nid = early_cpu_to_node(i); if (nid == NUMA_NO_NODE) continue; if (!node_online(nid)) numa_clear_node(i); } numa_init_array(); return 0; } /** * dummy_numa_init - Fallback dummy NUMA init * * Used if there's no underlying NUMA architecture, NUMA initialization * fails, or NUMA is disabled on the command line. * * Must online at least one node and add memory blocks that cover all * allowed memory. This function must not fail. */ static int __init dummy_numa_init(void) { printk(KERN_INFO "%s\n", numa_off ? "NUMA turned off" : "No NUMA configuration found"); printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n", 0LLU, PFN_PHYS(max_pfn) - 1); node_set(0, numa_nodes_parsed); numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); return 0; } /** * x86_numa_init - Initialize NUMA * * Try each configured NUMA initialization method until one succeeds. The * last fallback is dummy single node config encompassing whole memory and * never fails. */ void __init x86_numa_init(void) { if (!numa_off) { #ifdef CONFIG_ACPI_NUMA if (!numa_init(x86_acpi_numa_init)) return; #endif #ifdef CONFIG_AMD_NUMA if (!numa_init(amd_numa_init)) return; #endif if (acpi_disabled && !numa_init(of_numa_init)) return; } numa_init(dummy_numa_init); } /* * A node may exist which has one or more Generic Initiators but no CPUs and no * memory. * * This function must be called after init_cpu_to_node(), to ensure that any * memoryless CPU nodes have already been brought online, and before the * node_data[nid] is needed for zone list setup in build_all_zonelists(). * * When this function is called, any nodes containing either memory and/or CPUs * will already be online and there is no need to do anything extra, even if * they also contain one or more Generic Initiators. */ void __init init_gi_nodes(void) { int nid; /* * Exclude this node from * bringup_nonboot_cpus * cpu_up * __try_online_node * register_one_node * because node_subsys is not initialized yet. * TODO remove dependency on node_online */ for_each_node_state(nid, N_GENERIC_INITIATOR) if (!node_online(nid)) node_set_online(nid); } /* * Setup early cpu_to_node. * * Populate cpu_to_node[] only if x86_cpu_to_apicid[], * and apicid_to_node[] tables have valid entries for a CPU. * This means we skip cpu_to_node[] initialisation for NUMA * emulation and faking node case (when running a kernel compiled * for NUMA on a non NUMA box), which is OK as cpu_to_node[] * is already initialized in a round robin manner at numa_init_array, * prior to this call, and this initialization is good enough * for the fake NUMA cases. * * Called before the per_cpu areas are setup. */ void __init init_cpu_to_node(void) { int cpu; u32 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); BUG_ON(cpu_to_apicid == NULL); for_each_possible_cpu(cpu) { int node = numa_cpu_node(cpu); if (node == NUMA_NO_NODE) continue; /* * Exclude this node from * bringup_nonboot_cpus * cpu_up * __try_online_node * register_one_node * because node_subsys is not initialized yet. * TODO remove dependency on node_online */ if (!node_online(node)) node_set_online(node); numa_set_node(cpu, node); } } #ifndef CONFIG_DEBUG_PER_CPU_MAPS # ifndef CONFIG_NUMA_EMU void numa_add_cpu(unsigned int cpu) { cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); } void numa_remove_cpu(unsigned int cpu) { cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); } # endif /* !CONFIG_NUMA_EMU */ #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ int __cpu_to_node(int cpu) { if (early_per_cpu_ptr(x86_cpu_to_node_map)) { printk(KERN_WARNING "cpu_to_node(%d): usage too early!\n", cpu); dump_stack(); return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; } return per_cpu(x86_cpu_to_node_map, cpu); } EXPORT_SYMBOL(__cpu_to_node); /* * Same function as cpu_to_node() but used if called before the * per_cpu areas are setup. */ int early_cpu_to_node(int cpu) { if (early_per_cpu_ptr(x86_cpu_to_node_map)) return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; if (!cpu_possible(cpu)) { printk(KERN_WARNING "early_cpu_to_node(%d): no per_cpu area!\n", cpu); dump_stack(); return NUMA_NO_NODE; } return per_cpu(x86_cpu_to_node_map, cpu); } void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable) { struct cpumask *mask; if (node == NUMA_NO_NODE) { /* early_cpu_to_node() already emits a warning and trace */ return; } mask = node_to_cpumask_map[node]; if (!cpumask_available(mask)) { pr_err("node_to_cpumask_map[%i] NULL\n", node); dump_stack(); return; } if (enable) cpumask_set_cpu(cpu, mask); else cpumask_clear_cpu(cpu, mask); printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n", enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, cpumask_pr_args(mask)); return; } # ifndef CONFIG_NUMA_EMU static void numa_set_cpumask(int cpu, bool enable) { debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); } void numa_add_cpu(unsigned int cpu) { numa_set_cpumask(cpu, true); } void numa_remove_cpu(unsigned int cpu) { numa_set_cpumask(cpu, false); } # endif /* !CONFIG_NUMA_EMU */ /* * Returns a pointer to the bitmask of CPUs on Node 'node'. */ const struct cpumask *cpumask_of_node(int node) { if ((unsigned)node >= nr_node_ids) { printk(KERN_WARNING "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n", node, nr_node_ids); dump_stack(); return cpu_none_mask; } if (!cpumask_available(node_to_cpumask_map[node])) { printk(KERN_WARNING "cpumask_of_node(%d): no node_to_cpumask_map!\n", node); dump_stack(); return cpu_online_mask; } return node_to_cpumask_map[node]; } EXPORT_SYMBOL(cpumask_of_node); #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ #ifdef CONFIG_NUMA_EMU void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys, unsigned int nr_emu_nids) { int i, j; /* * Transform __apicid_to_node table to use emulated nids by * reverse-mapping phys_nid. The maps should always exist but fall * back to zero just in case. */ for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) { if (__apicid_to_node[i] == NUMA_NO_NODE) continue; for (j = 0; j < nr_emu_nids; j++) if (__apicid_to_node[i] == emu_nid_to_phys[j]) break; __apicid_to_node[i] = j < nr_emu_nids ? j : 0; } } u64 __init numa_emu_dma_end(void) { return PFN_PHYS(MAX_DMA32_PFN); } #endif /* CONFIG_NUMA_EMU */ |
| 1 10 10 12 4 3 5 8 3 3 2 6 2 1 3 7 2 3 2 12 2 7 3 1 1 1 1 1 1 6 2 4 20 11 8 4 2 2 11 4 8 25 9 15 4 2 2 4 1 3 5 2 3 6 1 6 2 1 1 3 2 1 5 5 1 1 7 7 37 37 2 2 2 2 84 85 1 1 2 2 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 12 2 12 2 2 13 1 14 10 3 9 9 2 15 14 2 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 | // SPDX-License-Identifier: GPL-2.0-only /* * vivid-core.c - A Virtual Video Test Driver, core initialization * * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/font.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/videodev2.h> #include <linux/v4l2-dv-timings.h> #include <media/videobuf2-vmalloc.h> #include <media/videobuf2-dma-contig.h> #include <media/v4l2-dv-timings.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-fh.h> #include <media/v4l2-event.h> #include "vivid-core.h" #include "vivid-vid-common.h" #include "vivid-vid-cap.h" #include "vivid-vid-out.h" #include "vivid-radio-common.h" #include "vivid-radio-rx.h" #include "vivid-radio-tx.h" #include "vivid-sdr-cap.h" #include "vivid-vbi-cap.h" #include "vivid-vbi-out.h" #include "vivid-osd.h" #include "vivid-cec.h" #include "vivid-ctrls.h" #include "vivid-meta-cap.h" #include "vivid-meta-out.h" #include "vivid-touch-cap.h" #define VIVID_MODULE_NAME "vivid" #define MAX_STRING_LENGTH 23 MODULE_DESCRIPTION("Virtual Video Test Driver"); MODULE_AUTHOR("Hans Verkuil"); MODULE_LICENSE("GPL"); unsigned int n_devs = 1; module_param(n_devs, uint, 0444); MODULE_PARM_DESC(n_devs, " number of driver instances to create"); static int vid_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(vid_cap_nr, int, NULL, 0444); MODULE_PARM_DESC(vid_cap_nr, " videoX start number, -1 is autodetect"); static int vid_out_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(vid_out_nr, int, NULL, 0444); MODULE_PARM_DESC(vid_out_nr, " videoX start number, -1 is autodetect"); static int vbi_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(vbi_cap_nr, int, NULL, 0444); MODULE_PARM_DESC(vbi_cap_nr, " vbiX start number, -1 is autodetect"); static int vbi_out_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(vbi_out_nr, int, NULL, 0444); MODULE_PARM_DESC(vbi_out_nr, " vbiX start number, -1 is autodetect"); static int sdr_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(sdr_cap_nr, int, NULL, 0444); MODULE_PARM_DESC(sdr_cap_nr, " swradioX start number, -1 is autodetect"); static int radio_rx_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(radio_rx_nr, int, NULL, 0444); MODULE_PARM_DESC(radio_rx_nr, " radioX start number, -1 is autodetect"); static int radio_tx_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(radio_tx_nr, int, NULL, 0444); MODULE_PARM_DESC(radio_tx_nr, " radioX start number, -1 is autodetect"); static int meta_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(meta_cap_nr, int, NULL, 0444); MODULE_PARM_DESC(meta_cap_nr, " videoX start number, -1 is autodetect"); static int meta_out_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(meta_out_nr, int, NULL, 0444); MODULE_PARM_DESC(meta_out_nr, " videoX start number, -1 is autodetect"); static int touch_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(touch_cap_nr, int, NULL, 0444); MODULE_PARM_DESC(touch_cap_nr, " v4l-touchX start number, -1 is autodetect"); static int ccs_cap_mode[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(ccs_cap_mode, int, NULL, 0444); MODULE_PARM_DESC(ccs_cap_mode, " capture crop/compose/scale mode:\n" "\t\t bit 0=crop, 1=compose, 2=scale,\n" "\t\t -1=user-controlled (default)"); static int ccs_out_mode[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 }; module_param_array(ccs_out_mode, int, NULL, 0444); MODULE_PARM_DESC(ccs_out_mode, " output crop/compose/scale mode:\n" "\t\t bit 0=crop, 1=compose, 2=scale,\n" "\t\t -1=user-controlled (default)"); static unsigned multiplanar[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 1 }; module_param_array(multiplanar, uint, NULL, 0444); MODULE_PARM_DESC(multiplanar, " 1 (default) creates a single planar device, 2 creates a multiplanar device."); /* * Default: video + vbi-cap (raw and sliced) + radio rx + radio tx + sdr + * vbi-out + vid-out + meta-cap */ static unsigned int node_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0xe1d3d }; module_param_array(node_types, uint, NULL, 0444); MODULE_PARM_DESC(node_types, " node types, default is 0xe1d3d. Bitmask with the following meaning:\n" "\t\t bit 0: Video Capture node\n" "\t\t bit 2-3: VBI Capture node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n" "\t\t bit 4: Radio Receiver node\n" "\t\t bit 5: Software Defined Radio Receiver node\n" "\t\t bit 8: Video Output node\n" "\t\t bit 10-11: VBI Output node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n" "\t\t bit 12: Radio Transmitter node\n" #ifdef CONFIG_VIDEO_VIVID_OSD "\t\t bit 16: Framebuffer for testing output overlays\n" #endif "\t\t bit 17: Metadata Capture node\n" "\t\t bit 18: Metadata Output node\n" "\t\t bit 19: Touch Capture node\n"); /* Default: 4 inputs */ static unsigned num_inputs[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 4 }; module_param_array(num_inputs, uint, NULL, 0444); MODULE_PARM_DESC(num_inputs, " number of inputs, default is 4"); /* Default: input 0 = WEBCAM, 1 = TV, 2 = SVID, 3 = HDMI */ static unsigned input_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0xe4 }; module_param_array(input_types, uint, NULL, 0444); MODULE_PARM_DESC(input_types, " input types, default is 0xe4. Two bits per input,\n" "\t\t bits 0-1 == input 0, bits 31-30 == input 15.\n" "\t\t Type 0 == webcam, 1 == TV, 2 == S-Video, 3 == HDMI"); /* Default: 2 outputs */ static unsigned num_outputs[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 2 }; module_param_array(num_outputs, uint, NULL, 0444); MODULE_PARM_DESC(num_outputs, " number of outputs, default is 2"); /* Default: output 0 = SVID, 1 = HDMI */ static unsigned output_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 2 }; module_param_array(output_types, uint, NULL, 0444); MODULE_PARM_DESC(output_types, " output types, default is 0x02. One bit per output,\n" "\t\t bit 0 == output 0, bit 15 == output 15.\n" "\t\t Type 0 == S-Video, 1 == HDMI"); unsigned vivid_debug; module_param(vivid_debug, uint, 0644); MODULE_PARM_DESC(vivid_debug, " activates debug info"); static bool no_error_inj; module_param(no_error_inj, bool, 0444); MODULE_PARM_DESC(no_error_inj, " if set disable the error injecting controls"); static unsigned int allocators[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0 }; module_param_array(allocators, uint, NULL, 0444); MODULE_PARM_DESC(allocators, " memory allocator selection, default is 0.\n" "\t\t 0 == vmalloc\n" "\t\t 1 == dma-contig"); static unsigned int cache_hints[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0 }; module_param_array(cache_hints, uint, NULL, 0444); MODULE_PARM_DESC(cache_hints, " user-space cache hints, default is 0.\n" "\t\t 0 == forbid\n" "\t\t 1 == allow"); static unsigned int supports_requests[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 1 }; module_param_array(supports_requests, uint, NULL, 0444); MODULE_PARM_DESC(supports_requests, " support for requests, default is 1.\n" "\t\t 0 == no support\n" "\t\t 1 == supports requests\n" "\t\t 2 == requires requests"); struct vivid_dev *vivid_devs[VIVID_MAX_DEVS]; DEFINE_SPINLOCK(hdmi_output_skip_mask_lock); struct workqueue_struct *update_hdmi_ctrls_workqueue; u64 hdmi_to_output_menu_skip_mask; u64 hdmi_input_update_outputs_mask; struct vivid_dev *vivid_ctrl_hdmi_to_output_instance[MAX_MENU_ITEMS]; unsigned int vivid_ctrl_hdmi_to_output_index[MAX_MENU_ITEMS]; char *vivid_ctrl_hdmi_to_output_strings[MAX_MENU_ITEMS + 1] = { "Test Pattern Generator", "None" }; DEFINE_SPINLOCK(svid_output_skip_mask_lock); struct workqueue_struct *update_svid_ctrls_workqueue; u64 svid_to_output_menu_skip_mask; struct vivid_dev *vivid_ctrl_svid_to_output_instance[MAX_MENU_ITEMS]; unsigned int vivid_ctrl_svid_to_output_index[MAX_MENU_ITEMS]; char *vivid_ctrl_svid_to_output_strings[MAX_MENU_ITEMS + 1] = { "Test Pattern Generator", "None" }; const struct v4l2_rect vivid_min_rect = { 0, 0, MIN_WIDTH, MIN_HEIGHT }; const struct v4l2_rect vivid_max_rect = { 0, 0, MAX_WIDTH * MAX_ZOOM, MAX_HEIGHT * MAX_ZOOM }; static const u8 vivid_hdmi_edid[256] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x34, 0x12, 0x00, 0x00, 0x00, 0x00, 0x22, 0x1a, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78, 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0x2f, 0xcf, 0x00, 0x31, 0x59, 0x45, 0x59, 0x81, 0x80, 0x81, 0x40, 0x90, 0x40, 0x95, 0x00, 0xa9, 0x40, 0xb3, 0x00, 0x08, 0xe8, 0x00, 0x30, 0xf2, 0x70, 0x5a, 0x80, 0xb0, 0x58, 0x8a, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18, 0x87, 0x3c, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x76, 0x69, 0x76, 0x69, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7b, 0x02, 0x03, 0x3f, 0xf1, 0x51, 0x61, 0x60, 0x5f, 0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21, 0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09, 0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x21, 0x00, 0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4, 0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xca, 0xe3, 0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d, 0xd0, 0x00, 0xa0, 0xf0, 0x70, 0x3e, 0x80, 0x30, 0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e, 0x1a, 0x36, 0x80, 0xa0, 0x70, 0x38, 0x1f, 0x40, 0x30, 0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51, 0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, }; static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct vivid_dev *dev = video_drvdata(file); strscpy(cap->driver, "vivid", sizeof(cap->driver)); strscpy(cap->card, "vivid", sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s-%03d", VIVID_MODULE_NAME, dev->inst); cap->capabilities = dev->vid_cap_caps | dev->vid_out_caps | dev->vbi_cap_caps | dev->vbi_out_caps | dev->radio_rx_caps | dev->radio_tx_caps | dev->sdr_cap_caps | dev->meta_cap_caps | dev->meta_out_caps | dev->touch_cap_caps | V4L2_CAP_DEVICE_CAPS; return 0; } static int vidioc_s_hw_freq_seek(struct file *file, void *priv, const struct v4l2_hw_freq_seek *a) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_RADIO) return vivid_radio_rx_s_hw_freq_seek(file, priv, a); return -ENOTTY; } static int vidioc_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_RADIO) return vivid_radio_rx_enum_freq_bands(file, priv, band); if (vdev->vfl_type == VFL_TYPE_SDR) return vivid_sdr_enum_freq_bands(file, priv, band); return -ENOTTY; } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_RADIO) return vivid_radio_rx_g_tuner(file, priv, vt); if (vdev->vfl_type == VFL_TYPE_SDR) return vivid_sdr_g_tuner(file, priv, vt); return vivid_video_g_tuner(file, priv, vt); } static int vidioc_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_RADIO) return vivid_radio_rx_s_tuner(file, priv, vt); if (vdev->vfl_type == VFL_TYPE_SDR) return vivid_sdr_s_tuner(file, priv, vt); return vivid_video_s_tuner(file, priv, vt); } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *vf) { struct vivid_dev *dev = video_drvdata(file); struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_RADIO) return vivid_radio_g_frequency(file, vdev->vfl_dir == VFL_DIR_RX ? &dev->radio_rx_freq : &dev->radio_tx_freq, vf); if (vdev->vfl_type == VFL_TYPE_SDR) return vivid_sdr_g_frequency(file, priv, vf); return vivid_video_g_frequency(file, priv, vf); } static int vidioc_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *vf) { struct vivid_dev *dev = video_drvdata(file); struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_RADIO) return vivid_radio_s_frequency(file, vdev->vfl_dir == VFL_DIR_RX ? &dev->radio_rx_freq : &dev->radio_tx_freq, vf); if (vdev->vfl_type == VFL_TYPE_SDR) return vivid_sdr_s_frequency(file, priv, vf); return vivid_video_s_frequency(file, priv, vf); } static int vidioc_overlay(struct file *file, void *priv, unsigned i) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return -ENOTTY; return vivid_vid_out_overlay(file, priv, i); } static int vidioc_g_fbuf(struct file *file, void *priv, struct v4l2_framebuffer *a) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return -ENOTTY; return vivid_vid_out_g_fbuf(file, priv, a); } static int vidioc_s_fbuf(struct file *file, void *priv, const struct v4l2_framebuffer *a) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return -ENOTTY; return vivid_vid_out_s_fbuf(file, priv, a); } static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return vivid_vid_cap_s_std(file, priv, id); return vivid_vid_out_s_std(file, priv, id); } static int vidioc_s_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return vivid_vid_cap_s_dv_timings(file, priv, timings); return vivid_vid_out_s_dv_timings(file, priv, timings); } static int vidioc_g_pixelaspect(struct file *file, void *priv, int type, struct v4l2_fract *f) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return vivid_vid_cap_g_pixelaspect(file, priv, type, f); return vivid_vid_out_g_pixelaspect(file, priv, type, f); } static int vidioc_g_selection(struct file *file, void *priv, struct v4l2_selection *sel) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return vivid_vid_cap_g_selection(file, priv, sel); return vivid_vid_out_g_selection(file, priv, sel); } static int vidioc_s_selection(struct file *file, void *priv, struct v4l2_selection *sel) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return vivid_vid_cap_s_selection(file, priv, sel); return vivid_vid_out_s_selection(file, priv, sel); } static int vidioc_g_parm(struct file *file, void *priv, struct v4l2_streamparm *parm) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_g_parm_tch(file, priv, parm); if (vdev->vfl_dir == VFL_DIR_RX) return vivid_vid_cap_g_parm(file, priv, parm); return vivid_vid_out_g_parm(file, priv, parm); } static int vidioc_s_parm(struct file *file, void *priv, struct v4l2_streamparm *parm) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return vivid_vid_cap_s_parm(file, priv, parm); return -ENOTTY; } static int vidioc_log_status(struct file *file, void *priv) { struct vivid_dev *dev = video_drvdata(file); struct video_device *vdev = video_devdata(file); v4l2_ctrl_log_status(file, priv); if (vdev->vfl_dir == VFL_DIR_RX && vdev->vfl_type == VFL_TYPE_VIDEO) tpg_log_status(&dev->tpg); return 0; } static ssize_t vivid_radio_read(struct file *file, char __user *buf, size_t size, loff_t *offset) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_TX) return -EINVAL; return vivid_radio_rx_read(file, buf, size, offset); } static ssize_t vivid_radio_write(struct file *file, const char __user *buf, size_t size, loff_t *offset) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return -EINVAL; return vivid_radio_tx_write(file, buf, size, offset); } static __poll_t vivid_radio_poll(struct file *file, struct poll_table_struct *wait) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_dir == VFL_DIR_RX) return vivid_radio_rx_poll(file, wait); return vivid_radio_tx_poll(file, wait); } static int vivid_enum_input(struct file *file, void *priv, struct v4l2_input *inp) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_enum_input_tch(file, priv, inp); return vidioc_enum_input(file, priv, inp); } static int vivid_g_input(struct file *file, void *priv, unsigned int *i) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_g_input_tch(file, priv, i); return vidioc_g_input(file, priv, i); } static int vivid_s_input(struct file *file, void *priv, unsigned int i) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_s_input_tch(file, priv, i); return vidioc_s_input(file, priv, i); } static int vivid_enum_fmt_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_enum_fmt_tch(file, priv, f); return vivid_enum_fmt_vid(file, priv, f); } static int vivid_g_fmt_cap(struct file *file, void *priv, struct v4l2_format *f) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_g_fmt_tch(file, priv, f); return vidioc_g_fmt_vid_cap(file, priv, f); } static int vivid_try_fmt_cap(struct file *file, void *priv, struct v4l2_format *f) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_g_fmt_tch(file, priv, f); return vidioc_try_fmt_vid_cap(file, priv, f); } static int vivid_s_fmt_cap(struct file *file, void *priv, struct v4l2_format *f) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_g_fmt_tch(file, priv, f); return vidioc_s_fmt_vid_cap(file, priv, f); } static int vivid_g_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_g_fmt_tch_mplane(file, priv, f); return vidioc_g_fmt_vid_cap_mplane(file, priv, f); } static int vivid_try_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_g_fmt_tch_mplane(file, priv, f); return vidioc_try_fmt_vid_cap_mplane(file, priv, f); } static int vivid_s_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct video_device *vdev = video_devdata(file); if (vdev->vfl_type == VFL_TYPE_TOUCH) return vivid_g_fmt_tch_mplane(file, priv, f); return vidioc_s_fmt_vid_cap_mplane(file, priv, f); } static bool vivid_is_in_use(bool valid, struct video_device *vdev) { unsigned long flags; bool res; if (!valid) return false; spin_lock_irqsave(&vdev->fh_lock, flags); res = !list_empty(&vdev->fh_list); spin_unlock_irqrestore(&vdev->fh_lock, flags); return res; } static bool vivid_is_last_user(struct vivid_dev *dev) { unsigned int uses = vivid_is_in_use(dev->has_vid_cap, &dev->vid_cap_dev) + vivid_is_in_use(dev->has_vid_out, &dev->vid_out_dev) + vivid_is_in_use(dev->has_vbi_cap, &dev->vbi_cap_dev) + vivid_is_in_use(dev->has_vbi_out, &dev->vbi_out_dev) + vivid_is_in_use(dev->has_radio_rx, &dev->radio_rx_dev) + vivid_is_in_use(dev->has_radio_tx, &dev->radio_tx_dev) + vivid_is_in_use(dev->has_sdr_cap, &dev->sdr_cap_dev) + vivid_is_in_use(dev->has_meta_cap, &dev->meta_cap_dev) + vivid_is_in_use(dev->has_meta_out, &dev->meta_out_dev) + vivid_is_in_use(dev->has_touch_cap, &dev->touch_cap_dev); return uses == 1; } static void vivid_reconnect(struct vivid_dev *dev) { if (dev->has_vid_cap) set_bit(V4L2_FL_REGISTERED, &dev->vid_cap_dev.flags); if (dev->has_vid_out) set_bit(V4L2_FL_REGISTERED, &dev->vid_out_dev.flags); if (dev->has_vbi_cap) set_bit(V4L2_FL_REGISTERED, &dev->vbi_cap_dev.flags); if (dev->has_vbi_out) set_bit(V4L2_FL_REGISTERED, &dev->vbi_out_dev.flags); if (dev->has_radio_rx) set_bit(V4L2_FL_REGISTERED, &dev->radio_rx_dev.flags); if (dev->has_radio_tx) set_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags); if (dev->has_sdr_cap) set_bit(V4L2_FL_REGISTERED, &dev->sdr_cap_dev.flags); if (dev->has_meta_cap) set_bit(V4L2_FL_REGISTERED, &dev->meta_cap_dev.flags); if (dev->has_meta_out) set_bit(V4L2_FL_REGISTERED, &dev->meta_out_dev.flags); if (dev->has_touch_cap) set_bit(V4L2_FL_REGISTERED, &dev->touch_cap_dev.flags); dev->disconnect_error = false; } static int vivid_fop_release(struct file *file) { struct vivid_dev *dev = video_drvdata(file); struct video_device *vdev = video_devdata(file); mutex_lock(&dev->mutex); if (!no_error_inj && v4l2_fh_is_singular_file(file) && dev->disconnect_error && !video_is_registered(vdev) && vivid_is_last_user(dev)) { /* * I am the last user of this driver, and a disconnect * was forced (since this video_device is unregistered), * so re-register all video_device's again. */ v4l2_info(&dev->v4l2_dev, "reconnect\n"); vivid_reconnect(dev); } if (file_to_v4l2_fh(file) == dev->radio_rx_rds_owner) { dev->radio_rx_rds_last_block = 0; dev->radio_rx_rds_owner = NULL; } if (file_to_v4l2_fh(file) == dev->radio_tx_rds_owner) { dev->radio_tx_rds_last_block = 0; dev->radio_tx_rds_owner = NULL; } mutex_unlock(&dev->mutex); if (vdev->queue) return vb2_fop_release(file); return v4l2_fh_release(file); } static const struct v4l2_file_operations vivid_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vivid_fop_release, .read = vb2_fop_read, .write = vb2_fop_write, .poll = vb2_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, }; static const struct v4l2_file_operations vivid_radio_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vivid_fop_release, .read = vivid_radio_read, .write = vivid_radio_write, .poll = vivid_radio_poll, .unlocked_ioctl = video_ioctl2, }; static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct video_device *vdev = video_devdata(file); int r; /* * Sliced and raw VBI capture share the same queue so we must * change the type. */ if (p->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE || p->type == V4L2_BUF_TYPE_VBI_CAPTURE) { r = vb2_queue_change_type(vdev->queue, p->type); if (r) return r; } return vb2_ioctl_reqbufs(file, priv, p); } static int vidioc_create_bufs(struct file *file, void *priv, struct v4l2_create_buffers *p) { struct video_device *vdev = video_devdata(file); int r; /* * Sliced and raw VBI capture share the same queue so we must * change the type. */ if (p->format.type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE || p->format.type == V4L2_BUF_TYPE_VBI_CAPTURE) { r = vb2_queue_change_type(vdev->queue, p->format.type); if (r) return r; } return vb2_ioctl_create_bufs(file, priv, p); } static const struct v4l2_ioctl_ops vivid_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vivid_enum_fmt_cap, .vidioc_g_fmt_vid_cap = vivid_g_fmt_cap, .vidioc_try_fmt_vid_cap = vivid_try_fmt_cap, .vidioc_s_fmt_vid_cap = vivid_s_fmt_cap, .vidioc_g_fmt_vid_cap_mplane = vivid_g_fmt_cap_mplane, .vidioc_try_fmt_vid_cap_mplane = vivid_try_fmt_cap_mplane, .vidioc_s_fmt_vid_cap_mplane = vivid_s_fmt_cap_mplane, .vidioc_enum_fmt_vid_out = vivid_enum_fmt_vid, .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out, .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out, .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out, .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_vid_out_mplane, .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out_mplane, .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_vid_out_mplane, .vidioc_g_selection = vidioc_g_selection, .vidioc_s_selection = vidioc_s_selection, .vidioc_g_pixelaspect = vidioc_g_pixelaspect, .vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap, .vidioc_try_fmt_vbi_cap = vidioc_g_fmt_vbi_cap, .vidioc_s_fmt_vbi_cap = vidioc_s_fmt_vbi_cap, .vidioc_g_fmt_sliced_vbi_cap = vidioc_g_fmt_sliced_vbi_cap, .vidioc_try_fmt_sliced_vbi_cap = vidioc_try_fmt_sliced_vbi_cap, .vidioc_s_fmt_sliced_vbi_cap = vidioc_s_fmt_sliced_vbi_cap, .vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap, .vidioc_g_fmt_vbi_out = vidioc_g_fmt_vbi_out, .vidioc_try_fmt_vbi_out = vidioc_g_fmt_vbi_out, .vidioc_s_fmt_vbi_out = vidioc_s_fmt_vbi_out, .vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out, .vidioc_try_fmt_sliced_vbi_out = vidioc_try_fmt_sliced_vbi_out, .vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out, .vidioc_enum_fmt_sdr_cap = vidioc_enum_fmt_sdr_cap, .vidioc_g_fmt_sdr_cap = vidioc_g_fmt_sdr_cap, .vidioc_try_fmt_sdr_cap = vidioc_try_fmt_sdr_cap, .vidioc_s_fmt_sdr_cap = vidioc_s_fmt_sdr_cap, .vidioc_overlay = vidioc_overlay, .vidioc_enum_framesizes = vidioc_enum_framesizes, .vidioc_enum_frameintervals = vidioc_enum_frameintervals, .vidioc_g_parm = vidioc_g_parm, .vidioc_s_parm = vidioc_s_parm, .vidioc_g_fmt_vid_out_overlay = vidioc_g_fmt_vid_out_overlay, .vidioc_try_fmt_vid_out_overlay = vidioc_try_fmt_vid_out_overlay, .vidioc_s_fmt_vid_out_overlay = vidioc_s_fmt_vid_out_overlay, .vidioc_g_fbuf = vidioc_g_fbuf, .vidioc_s_fbuf = vidioc_s_fbuf, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_create_bufs = vidioc_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_remove_bufs = vb2_ioctl_remove_bufs, .vidioc_enum_input = vivid_enum_input, .vidioc_g_input = vivid_g_input, .vidioc_s_input = vivid_s_input, .vidioc_s_audio = vidioc_s_audio, .vidioc_g_audio = vidioc_g_audio, .vidioc_enumaudio = vidioc_enumaudio, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_modulator = vidioc_s_modulator, .vidioc_g_modulator = vidioc_g_modulator, .vidioc_s_hw_freq_seek = vidioc_s_hw_freq_seek, .vidioc_enum_freq_bands = vidioc_enum_freq_bands, .vidioc_enum_output = vidioc_enum_output, .vidioc_g_output = vidioc_g_output, .vidioc_s_output = vidioc_s_output, .vidioc_s_audout = vidioc_s_audout, .vidioc_g_audout = vidioc_g_audout, .vidioc_enumaudout = vidioc_enumaudout, .vidioc_querystd = vidioc_querystd, .vidioc_g_std = vidioc_g_std, .vidioc_s_std = vidioc_s_std, .vidioc_s_dv_timings = vidioc_s_dv_timings, .vidioc_g_dv_timings = vidioc_g_dv_timings, .vidioc_query_dv_timings = vidioc_query_dv_timings, .vidioc_enum_dv_timings = vidioc_enum_dv_timings, .vidioc_dv_timings_cap = vidioc_dv_timings_cap, .vidioc_g_edid = vidioc_g_edid, .vidioc_s_edid = vidioc_s_edid, .vidioc_log_status = vidioc_log_status, .vidioc_subscribe_event = vidioc_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, .vidioc_enum_fmt_meta_cap = vidioc_enum_fmt_meta_cap, .vidioc_g_fmt_meta_cap = vidioc_g_fmt_meta_cap, .vidioc_s_fmt_meta_cap = vidioc_g_fmt_meta_cap, .vidioc_try_fmt_meta_cap = vidioc_g_fmt_meta_cap, .vidioc_enum_fmt_meta_out = vidioc_enum_fmt_meta_out, .vidioc_g_fmt_meta_out = vidioc_g_fmt_meta_out, .vidioc_s_fmt_meta_out = vidioc_g_fmt_meta_out, .vidioc_try_fmt_meta_out = vidioc_g_fmt_meta_out, }; /* ----------------------------------------------------------------- Initialization and module stuff ------------------------------------------------------------------*/ static void vivid_dev_release(struct v4l2_device *v4l2_dev) { struct vivid_dev *dev = container_of(v4l2_dev, struct vivid_dev, v4l2_dev); cancel_work_sync(&dev->update_hdmi_ctrl_work); vivid_free_controls(dev); v4l2_device_unregister(&dev->v4l2_dev); #ifdef CONFIG_MEDIA_CONTROLLER media_device_cleanup(&dev->mdev); #endif vfree(dev->scaled_line); vfree(dev->blended_line); vfree(dev->edid); tpg_free(&dev->tpg); kfree(dev->query_dv_timings_qmenu); kfree(dev->query_dv_timings_qmenu_strings); kfree(dev); } #ifdef CONFIG_MEDIA_CONTROLLER static int vivid_req_validate(struct media_request *req) { struct vivid_dev *dev = container_of(req->mdev, struct vivid_dev, mdev); if (dev->req_validate_error) { dev->req_validate_error = false; return -EINVAL; } return vb2_request_validate(req); } static const struct media_device_ops vivid_media_ops = { .req_validate = vivid_req_validate, .req_queue = vb2_request_queue, }; #endif static int vivid_create_queue(struct vivid_dev *dev, struct vb2_queue *q, u32 buf_type, unsigned int min_reqbufs_allocation, const struct vb2_ops *ops) { if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->multiplanar) buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; else if (buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT && dev->multiplanar) buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; else if (buf_type == V4L2_BUF_TYPE_VBI_CAPTURE && !dev->has_raw_vbi_cap) buf_type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE; else if (buf_type == V4L2_BUF_TYPE_VBI_OUTPUT && !dev->has_raw_vbi_out) buf_type = V4L2_BUF_TYPE_SLICED_VBI_OUTPUT; q->type = buf_type; q->io_modes = VB2_MMAP | VB2_DMABUF; q->io_modes |= V4L2_TYPE_IS_OUTPUT(buf_type) ? VB2_WRITE : VB2_READ; /* * The maximum number of buffers is 32768 if PAGE_SHIFT == 12, * see also MAX_BUFFER_INDEX in videobuf2-core.c. It will be less if * PAGE_SHIFT > 12, but then max_num_buffers will be clamped by * videobuf2-core.c to MAX_BUFFER_INDEX. */ if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) q->max_num_buffers = MAX_VID_CAP_BUFFERS; if (buf_type == V4L2_BUF_TYPE_SDR_CAPTURE) q->max_num_buffers = 1024; if (buf_type == V4L2_BUF_TYPE_VBI_CAPTURE) q->max_num_buffers = 32768; if (allocators[dev->inst] != 1) q->io_modes |= VB2_USERPTR; q->drv_priv = dev; q->buf_struct_size = sizeof(struct vivid_buffer); q->ops = ops; q->mem_ops = allocators[dev->inst] == 1 ? &vb2_dma_contig_memops : &vb2_vmalloc_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->min_reqbufs_allocation = min_reqbufs_allocation; q->lock = &dev->mutex; q->dev = dev->v4l2_dev.dev; q->supports_requests = supports_requests[dev->inst]; q->requires_requests = supports_requests[dev->inst] >= 2; q->allow_cache_hints = (cache_hints[dev->inst] == 1); return vb2_queue_init(q); } static int vivid_detect_feature_set(struct vivid_dev *dev, int inst, unsigned node_type, bool *has_tuner, bool *has_modulator, int *ccs_cap, int *ccs_out, unsigned in_type_counter[4], unsigned out_type_counter[4]) { int i; /* do we use single- or multi-planar? */ dev->multiplanar = multiplanar[inst] > 1; v4l2_info(&dev->v4l2_dev, "using %splanar format API\n", dev->multiplanar ? "multi" : "single "); /* how many inputs do we have and of what type? */ dev->num_inputs = num_inputs[inst]; if (node_type & 0x20007) { if (dev->num_inputs < 1) dev->num_inputs = 1; } else { dev->num_inputs = 0; } if (dev->num_inputs >= MAX_INPUTS) dev->num_inputs = MAX_INPUTS; for (i = 0; i < dev->num_inputs; i++) { dev->input_type[i] = (input_types[inst] >> (i * 2)) & 0x3; dev->input_name_counter[i] = in_type_counter[dev->input_type[i]]++; } dev->has_audio_inputs = in_type_counter[TV] && in_type_counter[SVID]; if (in_type_counter[HDMI] == 16) { /* The CEC physical address only allows for max 15 inputs */ in_type_counter[HDMI]--; dev->num_inputs--; } dev->num_hdmi_inputs = in_type_counter[HDMI]; dev->num_svid_inputs = in_type_counter[SVID]; /* how many outputs do we have and of what type? */ dev->num_outputs = num_outputs[inst]; if (node_type & 0x40300) { if (dev->num_outputs < 1) dev->num_outputs = 1; } else { dev->num_outputs = 0; } if (dev->num_outputs >= MAX_OUTPUTS) dev->num_outputs = MAX_OUTPUTS; for (i = 0; i < dev->num_outputs; i++) { dev->output_type[i] = ((output_types[inst] >> i) & 1) ? HDMI : SVID; dev->output_name_counter[i] = out_type_counter[dev->output_type[i]]++; } dev->has_audio_outputs = out_type_counter[SVID]; if (out_type_counter[HDMI] == 16) { /* * The CEC physical address only allows for max 15 inputs, * so outputs are also limited to 15 to allow for easy * CEC output to input mapping. */ out_type_counter[HDMI]--; dev->num_outputs--; } dev->num_hdmi_outputs = out_type_counter[HDMI]; /* do we create a video capture device? */ dev->has_vid_cap = node_type & 0x0001; /* do we create a vbi capture device? */ if (in_type_counter[TV] || in_type_counter[SVID]) { dev->has_raw_vbi_cap = node_type & 0x0004; dev->has_sliced_vbi_cap = node_type & 0x0008; dev->has_vbi_cap = dev->has_raw_vbi_cap | dev->has_sliced_vbi_cap; } /* do we create a meta capture device */ dev->has_meta_cap = node_type & 0x20000; /* sanity checks */ if ((in_type_counter[WEBCAM] || in_type_counter[HDMI]) && !dev->has_vid_cap && !dev->has_meta_cap) { v4l2_warn(&dev->v4l2_dev, "Webcam or HDMI input without video or metadata nodes\n"); return -EINVAL; } if ((in_type_counter[TV] || in_type_counter[SVID]) && !dev->has_vid_cap && !dev->has_vbi_cap && !dev->has_meta_cap) { v4l2_warn(&dev->v4l2_dev, "TV or S-Video input without video, VBI or metadata nodes\n"); return -EINVAL; } /* do we create a video output device? */ dev->has_vid_out = node_type & 0x0100; /* do we create a vbi output device? */ if (out_type_counter[SVID]) { dev->has_raw_vbi_out = node_type & 0x0400; dev->has_sliced_vbi_out = node_type & 0x0800; dev->has_vbi_out = dev->has_raw_vbi_out | dev->has_sliced_vbi_out; } /* do we create a metadata output device */ dev->has_meta_out = node_type & 0x40000; /* sanity checks */ if (out_type_counter[SVID] && !dev->has_vid_out && !dev->has_vbi_out && !dev->has_meta_out) { v4l2_warn(&dev->v4l2_dev, "S-Video output without video, VBI or metadata nodes\n"); return -EINVAL; } if (out_type_counter[HDMI] && !dev->has_vid_out && !dev->has_meta_out) { v4l2_warn(&dev->v4l2_dev, "HDMI output without video or metadata nodes\n"); return -EINVAL; } /* do we create a radio receiver device? */ dev->has_radio_rx = node_type & 0x0010; /* do we create a radio transmitter device? */ dev->has_radio_tx = node_type & 0x1000; /* do we create a software defined radio capture device? */ dev->has_sdr_cap = node_type & 0x0020; /* do we have a TV tuner? */ dev->has_tv_tuner = in_type_counter[TV]; /* do we have a tuner? */ *has_tuner = ((dev->has_vid_cap || dev->has_vbi_cap) && in_type_counter[TV]) || dev->has_radio_rx || dev->has_sdr_cap; /* do we have a modulator? */ *has_modulator = dev->has_radio_tx; #ifdef CONFIG_VIDEO_VIVID_OSD if (dev->has_vid_cap) /* do we have a framebuffer for overlay testing? */ dev->has_fb = node_type & 0x10000; #endif /* can we do crop/compose/scaling while capturing? */ if (no_error_inj && *ccs_cap == -1) *ccs_cap = 7; /* if ccs_cap == -1, then the user can select it using controls */ if (*ccs_cap != -1) { dev->has_crop_cap = *ccs_cap & 1; dev->has_compose_cap = *ccs_cap & 2; dev->has_scaler_cap = *ccs_cap & 4; v4l2_info(&dev->v4l2_dev, "Capture Crop: %c Compose: %c Scaler: %c\n", dev->has_crop_cap ? 'Y' : 'N', dev->has_compose_cap ? 'Y' : 'N', dev->has_scaler_cap ? 'Y' : 'N'); } /* can we do crop/compose/scaling with video output? */ if (no_error_inj && *ccs_out == -1) *ccs_out = 7; /* if ccs_out == -1, then the user can select it using controls */ if (*ccs_out != -1) { dev->has_crop_out = *ccs_out & 1; dev->has_compose_out = *ccs_out & 2; dev->has_scaler_out = *ccs_out & 4; v4l2_info(&dev->v4l2_dev, "Output Crop: %c Compose: %c Scaler: %c\n", dev->has_crop_out ? 'Y' : 'N', dev->has_compose_out ? 'Y' : 'N', dev->has_scaler_out ? 'Y' : 'N'); } /* do we create a touch capture device */ dev->has_touch_cap = node_type & 0x80000; return 0; } static void vivid_set_capabilities(struct vivid_dev *dev) { if (dev->has_vid_cap) { /* set up the capabilities of the video capture device */ dev->vid_cap_caps = dev->multiplanar ? V4L2_CAP_VIDEO_CAPTURE_MPLANE : V4L2_CAP_VIDEO_CAPTURE; dev->vid_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; if (dev->has_audio_inputs) dev->vid_cap_caps |= V4L2_CAP_AUDIO; if (dev->has_tv_tuner) dev->vid_cap_caps |= V4L2_CAP_TUNER; } if (dev->has_vid_out) { /* set up the capabilities of the video output device */ dev->vid_out_caps = dev->multiplanar ? V4L2_CAP_VIDEO_OUTPUT_MPLANE : V4L2_CAP_VIDEO_OUTPUT; if (dev->has_fb) dev->vid_out_caps |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY; dev->vid_out_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; if (dev->has_audio_outputs) dev->vid_out_caps |= V4L2_CAP_AUDIO; } if (dev->has_vbi_cap) { /* set up the capabilities of the vbi capture device */ dev->vbi_cap_caps = (dev->has_raw_vbi_cap ? V4L2_CAP_VBI_CAPTURE : 0) | (dev->has_sliced_vbi_cap ? V4L2_CAP_SLICED_VBI_CAPTURE : 0); dev->vbi_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; if (dev->has_audio_inputs) dev->vbi_cap_caps |= V4L2_CAP_AUDIO; if (dev->has_tv_tuner) dev->vbi_cap_caps |= V4L2_CAP_TUNER; } if (dev->has_vbi_out) { /* set up the capabilities of the vbi output device */ dev->vbi_out_caps = (dev->has_raw_vbi_out ? V4L2_CAP_VBI_OUTPUT : 0) | (dev->has_sliced_vbi_out ? V4L2_CAP_SLICED_VBI_OUTPUT : 0); dev->vbi_out_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; if (dev->has_audio_outputs) dev->vbi_out_caps |= V4L2_CAP_AUDIO; } if (dev->has_sdr_cap) { /* set up the capabilities of the sdr capture device */ dev->sdr_cap_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER; dev->sdr_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; } /* set up the capabilities of the radio receiver device */ if (dev->has_radio_rx) dev->radio_rx_caps = V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE | V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER | V4L2_CAP_READWRITE; /* set up the capabilities of the radio transmitter device */ if (dev->has_radio_tx) dev->radio_tx_caps = V4L2_CAP_RDS_OUTPUT | V4L2_CAP_MODULATOR | V4L2_CAP_READWRITE; /* set up the capabilities of meta capture device */ if (dev->has_meta_cap) { dev->meta_cap_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; if (dev->has_audio_inputs) dev->meta_cap_caps |= V4L2_CAP_AUDIO; if (dev->has_tv_tuner) dev->meta_cap_caps |= V4L2_CAP_TUNER; } /* set up the capabilities of meta output device */ if (dev->has_meta_out) { dev->meta_out_caps = V4L2_CAP_META_OUTPUT | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; if (dev->has_audio_outputs) dev->meta_out_caps |= V4L2_CAP_AUDIO; } /* set up the capabilities of the touch capture device */ if (dev->has_touch_cap) { dev->touch_cap_caps = V4L2_CAP_TOUCH | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; dev->touch_cap_caps |= dev->multiplanar ? V4L2_CAP_VIDEO_CAPTURE_MPLANE : V4L2_CAP_VIDEO_CAPTURE; } } static void vivid_disable_unused_ioctls(struct vivid_dev *dev, bool has_tuner, bool has_modulator, unsigned in_type_counter[4], unsigned out_type_counter[4]) { /* disable invalid ioctls based on the feature set */ if (!dev->has_audio_inputs) { v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_AUDIO); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_AUDIO); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_ENUMAUDIO); v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_AUDIO); v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_AUDIO); v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_ENUMAUDIO); v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_AUDIO); v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_AUDIO); v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_ENUMAUDIO); } if (!dev->has_audio_outputs) { v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_AUDOUT); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_AUDOUT); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUMAUDOUT); v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_S_AUDOUT); v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_AUDOUT); v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_ENUMAUDOUT); v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_S_AUDOUT); v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_G_AUDOUT); v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_ENUMAUDOUT); } if (!in_type_counter[TV] && !in_type_counter[SVID]) { v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_STD); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_STD); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_ENUMSTD); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_QUERYSTD); } if (!out_type_counter[SVID]) { v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_STD); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_STD); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUMSTD); } if (!has_tuner && !has_modulator) { v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_FREQUENCY); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_FREQUENCY); v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_FREQUENCY); v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_FREQUENCY); v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_FREQUENCY); v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_FREQUENCY); } if (!has_tuner) { v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_TUNER); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_TUNER); v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_TUNER); v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_TUNER); v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_TUNER); v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_TUNER); } if (in_type_counter[HDMI] == 0) { v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_EDID); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_EDID); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_DV_TIMINGS_CAP); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_DV_TIMINGS); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_DV_TIMINGS); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_ENUM_DV_TIMINGS); v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_QUERY_DV_TIMINGS); } if (out_type_counter[HDMI] == 0) { v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_EDID); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_DV_TIMINGS_CAP); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_DV_TIMINGS); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_DV_TIMINGS); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_DV_TIMINGS); } if (!dev->has_fb) { v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_FBUF); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_FBUF); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_OVERLAY); } v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_HW_FREQ_SEEK); v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_HW_FREQ_SEEK); v4l2_disable_ioctl(&dev->sdr_cap_dev, VIDIOC_S_HW_FREQ_SEEK); v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_HW_FREQ_SEEK); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_FREQUENCY); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_FREQUENCY); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_FRAMESIZES); v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_FRAMEINTERVALS); v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_S_FREQUENCY); v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_FREQUENCY); v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_S_FREQUENCY); v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_G_FREQUENCY); v4l2_disable_ioctl(&dev->touch_cap_dev, VIDIOC_S_PARM); v4l2_disable_ioctl(&dev->touch_cap_dev, VIDIOC_ENUM_FRAMESIZES); v4l2_disable_ioctl(&dev->touch_cap_dev, VIDIOC_ENUM_FRAMEINTERVALS); } static int vivid_init_dv_timings(struct vivid_dev *dev) { int i; while (v4l2_dv_timings_presets[dev->query_dv_timings_size].bt.width) dev->query_dv_timings_size++; /* * Create a char pointer array that points to the names of all the * preset timings */ dev->query_dv_timings_qmenu = kmalloc_array(dev->query_dv_timings_size, sizeof(char *), GFP_KERNEL); /* * Create a string array containing the names of all the preset * timings. Each name is max 31 chars long (+ terminating 0). */ dev->query_dv_timings_qmenu_strings = kmalloc_array(dev->query_dv_timings_size, 32, GFP_KERNEL); if (!dev->query_dv_timings_qmenu || !dev->query_dv_timings_qmenu_strings) return -ENOMEM; for (i = 0; i < dev->query_dv_timings_size; i++) { const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt; char *p = dev->query_dv_timings_qmenu_strings + i * 32; u32 htot, vtot; dev->query_dv_timings_qmenu[i] = p; htot = V4L2_DV_BT_FRAME_WIDTH(bt); vtot = V4L2_DV_BT_FRAME_HEIGHT(bt); snprintf(p, 32, "%ux%u%s%u", bt->width, bt->height, bt->interlaced ? "i" : "p", (u32)bt->pixelclock / (htot * vtot)); } return 0; } static int vivid_create_queues(struct vivid_dev *dev) { int ret; /* start creating the vb2 queues */ if (dev->has_vid_cap) { /* initialize vid_cap queue */ ret = vivid_create_queue(dev, &dev->vb_vid_cap_q, V4L2_BUF_TYPE_VIDEO_CAPTURE, 2, &vivid_vid_cap_qops); if (ret) return ret; } if (dev->has_vid_out) { /* initialize vid_out queue */ ret = vivid_create_queue(dev, &dev->vb_vid_out_q, V4L2_BUF_TYPE_VIDEO_OUTPUT, 2, &vivid_vid_out_qops); if (ret) return ret; } if (dev->has_vbi_cap) { /* initialize vbi_cap queue */ ret = vivid_create_queue(dev, &dev->vb_vbi_cap_q, V4L2_BUF_TYPE_VBI_CAPTURE, 2, &vivid_vbi_cap_qops); if (ret) return ret; } if (dev->has_vbi_out) { /* initialize vbi_out queue */ ret = vivid_create_queue(dev, &dev->vb_vbi_out_q, V4L2_BUF_TYPE_VBI_OUTPUT, 2, &vivid_vbi_out_qops); if (ret) return ret; } if (dev->has_sdr_cap) { /* initialize sdr_cap queue */ ret = vivid_create_queue(dev, &dev->vb_sdr_cap_q, V4L2_BUF_TYPE_SDR_CAPTURE, 8, &vivid_sdr_cap_qops); if (ret) return ret; } if (dev->has_meta_cap) { /* initialize meta_cap queue */ ret = vivid_create_queue(dev, &dev->vb_meta_cap_q, V4L2_BUF_TYPE_META_CAPTURE, 2, &vivid_meta_cap_qops); if (ret) return ret; } if (dev->has_meta_out) { /* initialize meta_out queue */ ret = vivid_create_queue(dev, &dev->vb_meta_out_q, V4L2_BUF_TYPE_META_OUTPUT, 2, &vivid_meta_out_qops); if (ret) return ret; } if (dev->has_touch_cap) { /* initialize touch_cap queue */ ret = vivid_create_queue(dev, &dev->vb_touch_cap_q, V4L2_BUF_TYPE_VIDEO_CAPTURE, 2, &vivid_touch_cap_qops); if (ret) return ret; } if (dev->has_fb) { /* Create framebuffer for testing output overlay */ ret = vivid_fb_init(dev); if (ret) return ret; } return 0; } static int vivid_create_devnodes(struct platform_device *pdev, struct vivid_dev *dev, int inst, v4l2_std_id tvnorms_cap, v4l2_std_id tvnorms_out, unsigned in_type_counter[4], unsigned out_type_counter[4]) { struct video_device *vfd; int ret; if (dev->has_vid_cap) { vfd = &dev->vid_cap_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-vid-cap", inst); vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->vid_cap_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->queue = &dev->vb_vid_cap_q; vfd->tvnorms = tvnorms_cap; /* * Provide a mutex to v4l2 core. It will be used to protect * all fops and v4l2 ioctls. */ vfd->lock = &dev->mutex; video_set_drvdata(vfd, dev); #ifdef CONFIG_MEDIA_CONTROLLER dev->vid_cap_pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&vfd->entity, 1, &dev->vid_cap_pad); if (ret) return ret; #endif #ifdef CONFIG_VIDEO_VIVID_CEC if (in_type_counter[HDMI]) { ret = cec_register_adapter(dev->cec_rx_adap, &pdev->dev); if (ret < 0) { cec_delete_adapter(dev->cec_rx_adap); dev->cec_rx_adap = NULL; return ret; } cec_s_phys_addr(dev->cec_rx_adap, 0, false); v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI input\n", dev_name(&dev->cec_rx_adap->devnode.dev)); } #endif ret = video_register_device(vfd, VFL_TYPE_VIDEO, vid_cap_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s\n", video_device_node_name(vfd)); } if (dev->has_vid_out) { #ifdef CONFIG_VIDEO_VIVID_CEC int i; #endif vfd = &dev->vid_out_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-vid-out", inst); vfd->vfl_dir = VFL_DIR_TX; vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->vid_out_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->queue = &dev->vb_vid_out_q; vfd->tvnorms = tvnorms_out; /* * Provide a mutex to v4l2 core. It will be used to protect * all fops and v4l2 ioctls. */ vfd->lock = &dev->mutex; video_set_drvdata(vfd, dev); #ifdef CONFIG_MEDIA_CONTROLLER dev->vid_out_pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&vfd->entity, 1, &dev->vid_out_pad); if (ret) return ret; #endif #ifdef CONFIG_VIDEO_VIVID_CEC for (i = 0; i < dev->num_hdmi_outputs; i++) { ret = cec_register_adapter(dev->cec_tx_adap[i], &pdev->dev); if (ret < 0) { for (; i >= 0; i--) { cec_delete_adapter(dev->cec_tx_adap[i]); dev->cec_tx_adap[i] = NULL; } return ret; } v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI output %d\n", dev_name(&dev->cec_tx_adap[i]->devnode.dev), i); } #endif ret = video_register_device(vfd, VFL_TYPE_VIDEO, vid_out_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 output device registered as %s\n", video_device_node_name(vfd)); } if (dev->has_vbi_cap) { vfd = &dev->vbi_cap_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-vbi-cap", inst); vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->vbi_cap_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->queue = &dev->vb_vbi_cap_q; vfd->lock = &dev->mutex; vfd->tvnorms = tvnorms_cap; video_set_drvdata(vfd, dev); #ifdef CONFIG_MEDIA_CONTROLLER dev->vbi_cap_pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&vfd->entity, 1, &dev->vbi_cap_pad); if (ret) return ret; #endif ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_cap_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s, supports %s VBI\n", video_device_node_name(vfd), (dev->has_raw_vbi_cap && dev->has_sliced_vbi_cap) ? "raw and sliced" : (dev->has_raw_vbi_cap ? "raw" : "sliced")); } if (dev->has_vbi_out) { vfd = &dev->vbi_out_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-vbi-out", inst); vfd->vfl_dir = VFL_DIR_TX; vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->vbi_out_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->queue = &dev->vb_vbi_out_q; vfd->lock = &dev->mutex; vfd->tvnorms = tvnorms_out; video_set_drvdata(vfd, dev); #ifdef CONFIG_MEDIA_CONTROLLER dev->vbi_out_pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&vfd->entity, 1, &dev->vbi_out_pad); if (ret) return ret; #endif ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_out_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 output device registered as %s, supports %s VBI\n", video_device_node_name(vfd), (dev->has_raw_vbi_out && dev->has_sliced_vbi_out) ? "raw and sliced" : (dev->has_raw_vbi_out ? "raw" : "sliced")); } if (dev->has_sdr_cap) { vfd = &dev->sdr_cap_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-sdr-cap", inst); vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->sdr_cap_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->queue = &dev->vb_sdr_cap_q; vfd->lock = &dev->mutex; video_set_drvdata(vfd, dev); #ifdef CONFIG_MEDIA_CONTROLLER dev->sdr_cap_pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&vfd->entity, 1, &dev->sdr_cap_pad); if (ret) return ret; #endif ret = video_register_device(vfd, VFL_TYPE_SDR, sdr_cap_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s\n", video_device_node_name(vfd)); } if (dev->has_radio_rx) { vfd = &dev->radio_rx_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-rad-rx", inst); vfd->fops = &vivid_radio_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->radio_rx_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->lock = &dev->mutex; video_set_drvdata(vfd, dev); ret = video_register_device(vfd, VFL_TYPE_RADIO, radio_rx_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 receiver device registered as %s\n", video_device_node_name(vfd)); } if (dev->has_radio_tx) { vfd = &dev->radio_tx_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-rad-tx", inst); vfd->vfl_dir = VFL_DIR_TX; vfd->fops = &vivid_radio_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->radio_tx_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->lock = &dev->mutex; video_set_drvdata(vfd, dev); ret = video_register_device(vfd, VFL_TYPE_RADIO, radio_tx_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 transmitter device registered as %s\n", video_device_node_name(vfd)); } if (dev->has_meta_cap) { vfd = &dev->meta_cap_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-meta-cap", inst); vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->meta_cap_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->queue = &dev->vb_meta_cap_q; vfd->lock = &dev->mutex; vfd->tvnorms = tvnorms_cap; video_set_drvdata(vfd, dev); #ifdef CONFIG_MEDIA_CONTROLLER dev->meta_cap_pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&vfd->entity, 1, &dev->meta_cap_pad); if (ret) return ret; #endif ret = video_register_device(vfd, VFL_TYPE_VIDEO, meta_cap_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 metadata capture device registered as %s\n", video_device_node_name(vfd)); } if (dev->has_meta_out) { vfd = &dev->meta_out_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-meta-out", inst); vfd->vfl_dir = VFL_DIR_TX; vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->meta_out_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->queue = &dev->vb_meta_out_q; vfd->lock = &dev->mutex; vfd->tvnorms = tvnorms_out; video_set_drvdata(vfd, dev); #ifdef CONFIG_MEDIA_CONTROLLER dev->meta_out_pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&vfd->entity, 1, &dev->meta_out_pad); if (ret) return ret; #endif ret = video_register_device(vfd, VFL_TYPE_VIDEO, meta_out_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 metadata output device registered as %s\n", video_device_node_name(vfd)); } if (dev->has_touch_cap) { vfd = &dev->touch_cap_dev; snprintf(vfd->name, sizeof(vfd->name), "vivid-%03d-touch-cap", inst); vfd->fops = &vivid_fops; vfd->ioctl_ops = &vivid_ioctl_ops; vfd->device_caps = dev->touch_cap_caps; vfd->release = video_device_release_empty; vfd->v4l2_dev = &dev->v4l2_dev; vfd->queue = &dev->vb_touch_cap_q; vfd->tvnorms = tvnorms_cap; vfd->lock = &dev->mutex; video_set_drvdata(vfd, dev); #ifdef CONFIG_MEDIA_CONTROLLER dev->touch_cap_pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&vfd->entity, 1, &dev->touch_cap_pad); if (ret) return ret; #endif ret = video_register_device(vfd, VFL_TYPE_TOUCH, touch_cap_nr[inst]); if (ret < 0) return ret; v4l2_info(&dev->v4l2_dev, "V4L2 touch capture device registered as %s\n", video_device_node_name(vfd)); } #ifdef CONFIG_MEDIA_CONTROLLER /* Register the media device */ ret = media_device_register(&dev->mdev); if (ret) { dev_err(dev->mdev.dev, "media device register failed (err=%d)\n", ret); return ret; } #endif return 0; } static void update_hdmi_ctrls_work_handler(struct work_struct *work) { u64 skip_mask; u64 update_mask; spin_lock(&hdmi_output_skip_mask_lock); skip_mask = hdmi_to_output_menu_skip_mask; update_mask = hdmi_input_update_outputs_mask; hdmi_input_update_outputs_mask = 0; spin_unlock(&hdmi_output_skip_mask_lock); for (int i = 0; i < n_devs && vivid_devs[i]; i++) { if (update_mask & (1 << i)) vivid_update_connected_outputs(vivid_devs[i]); for (int j = 0; j < vivid_devs[i]->num_hdmi_inputs; j++) { struct v4l2_ctrl *c = vivid_devs[i]->ctrl_hdmi_to_output[j]; v4l2_ctrl_modify_range(c, c->minimum, c->maximum, skip_mask & ~(1ULL << c->cur.val), c->default_value); } } } static void update_svid_ctrls_work_handler(struct work_struct *work) { u64 skip_mask; spin_lock(&svid_output_skip_mask_lock); skip_mask = svid_to_output_menu_skip_mask; spin_unlock(&svid_output_skip_mask_lock); for (int i = 0; i < n_devs && vivid_devs[i]; i++) { for (int j = 0; j < vivid_devs[i]->num_svid_inputs; j++) { struct v4l2_ctrl *c = vivid_devs[i]->ctrl_svid_to_output[j]; v4l2_ctrl_modify_range(c, c->minimum, c->maximum, skip_mask & ~(1ULL << c->cur.val), c->default_value); } } } static int vivid_create_instance(struct platform_device *pdev, int inst) { static const struct v4l2_dv_timings def_dv_timings = V4L2_DV_BT_CEA_1280X720P60; unsigned in_type_counter[4] = { 0, 0, 0, 0 }; unsigned out_type_counter[4] = { 0, 0, 0, 0 }; int ccs_cap = ccs_cap_mode[inst]; int ccs_out = ccs_out_mode[inst]; bool has_tuner; bool has_modulator; struct vivid_dev *dev; unsigned node_type = node_types[inst]; v4l2_std_id tvnorms_cap = 0, tvnorms_out = 0; int ret; int i; /* allocate main vivid state structure */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->inst = inst; #ifdef CONFIG_MEDIA_CONTROLLER dev->v4l2_dev.mdev = &dev->mdev; /* Initialize media device */ strscpy(dev->mdev.model, VIVID_MODULE_NAME, sizeof(dev->mdev.model)); snprintf(dev->mdev.bus_info, sizeof(dev->mdev.bus_info), "platform:%s-%03d", VIVID_MODULE_NAME, inst); dev->mdev.dev = &pdev->dev; media_device_init(&dev->mdev); dev->mdev.ops = &vivid_media_ops; #endif /* register v4l2_device */ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s-%03d", VIVID_MODULE_NAME, inst); ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) { kfree(dev); return ret; } dev->v4l2_dev.release = vivid_dev_release; ret = vivid_detect_feature_set(dev, inst, node_type, &has_tuner, &has_modulator, &ccs_cap, &ccs_out, in_type_counter, out_type_counter); if (ret) goto free_dev; vivid_set_capabilities(dev); ret = -ENOMEM; /* initialize the test pattern generator */ tpg_init(&dev->tpg, 640, 360); if (tpg_alloc(&dev->tpg, array_size(MAX_WIDTH, MAX_ZOOM))) goto free_dev; dev->scaled_line = vzalloc(array_size(MAX_WIDTH, MAX_ZOOM)); if (!dev->scaled_line) goto free_dev; dev->blended_line = vzalloc(array_size(MAX_WIDTH, MAX_ZOOM)); if (!dev->blended_line) goto free_dev; /* load the edid */ dev->edid = vmalloc(array_size(256, 128)); if (!dev->edid) goto free_dev; ret = vivid_init_dv_timings(dev); if (ret < 0) goto free_dev; vivid_disable_unused_ioctls(dev, has_tuner, has_modulator, in_type_counter, out_type_counter); /* configure internal data */ dev->fmt_cap = &vivid_formats[0]; dev->fmt_out = &vivid_formats[0]; if (!dev->multiplanar) vivid_formats[0].data_offset[0] = 0; dev->webcam_size_idx = 1; dev->webcam_ival_idx = 3; tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc); dev->std_out = V4L2_STD_PAL; if (dev->input_type[0] == TV || dev->input_type[0] == SVID) tvnorms_cap = V4L2_STD_ALL; if (dev->output_type[0] == SVID) tvnorms_out = V4L2_STD_ALL; for (i = 0; i < MAX_INPUTS; i++) { dev->dv_timings_cap[i] = def_dv_timings; dev->std_cap[i] = V4L2_STD_PAL; } dev->dv_timings_out = def_dv_timings; dev->tv_freq = 2804 /* 175.25 * 16 */; dev->tv_audmode = V4L2_TUNER_MODE_STEREO; dev->tv_field_cap = V4L2_FIELD_INTERLACED; dev->tv_field_out = V4L2_FIELD_INTERLACED; dev->radio_rx_freq = 95000 * 16; dev->radio_rx_audmode = V4L2_TUNER_MODE_STEREO; if (dev->has_radio_tx) { dev->radio_tx_freq = 95500 * 16; dev->radio_rds_loop = false; } dev->radio_tx_subchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_RDS; dev->sdr_adc_freq = 300000; dev->sdr_fm_freq = 50000000; dev->sdr_pixelformat = V4L2_SDR_FMT_CU8; dev->sdr_buffersize = SDR_CAP_SAMPLES_PER_BUF * 2; dev->edid_max_blocks = dev->edid_blocks = 2; memcpy(dev->edid, vivid_hdmi_edid, sizeof(vivid_hdmi_edid)); dev->radio_rds_init_time = ktime_get(); INIT_WORK(&dev->update_hdmi_ctrl_work, update_hdmi_ctrls_work_handler); INIT_WORK(&dev->update_svid_ctrl_work, update_svid_ctrls_work_handler); for (int j = 0, k = 0; j < dev->num_inputs; ++j) if (dev->input_type[j] == HDMI) dev->hdmi_index_to_input_index[k++] = j; for (int j = 0, k = 0; j < dev->num_outputs; ++j) if (dev->output_type[j] == HDMI) { dev->output_to_iface_index[j] = k; dev->hdmi_index_to_output_index[k++] = j; } for (int j = 0, k = 0; j < dev->num_inputs; ++j) if (dev->input_type[j] == SVID) dev->svid_index_to_input_index[k++] = j; for (int j = 0, k = 0; j < dev->num_outputs; ++j) if (dev->output_type[j] == SVID) dev->output_to_iface_index[j] = k++; /* create all controls */ ret = vivid_create_controls(dev, ccs_cap == -1, ccs_out == -1, no_error_inj, in_type_counter[TV] || in_type_counter[SVID] || out_type_counter[SVID], in_type_counter[HDMI] || out_type_counter[HDMI]); if (ret) goto unreg_dev; /* enable/disable interface specific controls */ if (dev->num_inputs && dev->input_type[0] != HDMI) { v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode, false); v4l2_ctrl_activate(dev->ctrl_dv_timings, false); } else if (dev->num_inputs && dev->input_type[0] == HDMI) { v4l2_ctrl_activate(dev->ctrl_std_signal_mode, false); v4l2_ctrl_activate(dev->ctrl_standard, false); } /* * update the capture and output formats to do a proper initial * configuration. */ vivid_update_format_cap(dev, false); vivid_update_format_out(dev); /* update touch configuration */ dev->timeperframe_tch_cap.numerator = 1; dev->timeperframe_tch_cap.denominator = 10; vivid_set_touch(dev, 0); /* initialize locks */ spin_lock_init(&dev->slock); mutex_init(&dev->mutex); /* init dma queues */ INIT_LIST_HEAD(&dev->vid_cap_active); INIT_LIST_HEAD(&dev->vid_out_active); INIT_LIST_HEAD(&dev->vbi_cap_active); INIT_LIST_HEAD(&dev->vbi_out_active); INIT_LIST_HEAD(&dev->sdr_cap_active); INIT_LIST_HEAD(&dev->meta_cap_active); INIT_LIST_HEAD(&dev->meta_out_active); INIT_LIST_HEAD(&dev->touch_cap_active); spin_lock_init(&dev->cec_xfers_slock); if (allocators[inst] == 1) dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); ret = vivid_create_queues(dev); if (ret) goto unreg_dev; #ifdef CONFIG_VIDEO_VIVID_CEC if (dev->has_vid_cap && in_type_counter[HDMI]) { struct cec_adapter *adap; adap = vivid_cec_alloc_adap(dev, 0, false); ret = PTR_ERR_OR_ZERO(adap); if (ret < 0) goto unreg_dev; dev->cec_rx_adap = adap; } if (dev->has_vid_out) { int j; for (i = j = 0; i < dev->num_outputs; i++) { struct cec_adapter *adap; if (dev->output_type[i] != HDMI) continue; adap = vivid_cec_alloc_adap(dev, j, true); ret = PTR_ERR_OR_ZERO(adap); if (ret < 0) { while (j--) cec_delete_adapter(dev->cec_tx_adap[j]); goto unreg_dev; } dev->cec_tx_adap[j++] = adap; } } if (dev->cec_rx_adap || dev->num_hdmi_outputs) { init_waitqueue_head(&dev->kthread_waitq_cec); dev->kthread_cec = kthread_run(vivid_cec_bus_thread, dev, "vivid_cec-%s", dev->v4l2_dev.name); if (IS_ERR(dev->kthread_cec)) { ret = PTR_ERR(dev->kthread_cec); dev->kthread_cec = NULL; v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n"); goto unreg_dev; } } #endif v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_cap); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_out); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vbi_cap); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vbi_out); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_rx); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_tx); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_sdr_cap); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_cap); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_out); v4l2_ctrl_handler_setup(&dev->ctrl_hdl_touch_cap); /* finally start creating the device nodes */ ret = vivid_create_devnodes(pdev, dev, inst, tvnorms_cap, tvnorms_out, in_type_counter, out_type_counter); if (ret) goto unreg_dev; /* Now that everything is fine, let's add it to device list */ vivid_devs[inst] = dev; return 0; unreg_dev: vb2_video_unregister_device(&dev->touch_cap_dev); vb2_video_unregister_device(&dev->meta_out_dev); vb2_video_unregister_device(&dev->meta_cap_dev); video_unregister_device(&dev->radio_tx_dev); video_unregister_device(&dev->radio_rx_dev); vb2_video_unregister_device(&dev->sdr_cap_dev); vb2_video_unregister_device(&dev->vbi_out_dev); vb2_video_unregister_device(&dev->vbi_cap_dev); vb2_video_unregister_device(&dev->vid_out_dev); vb2_video_unregister_device(&dev->vid_cap_dev); cec_unregister_adapter(dev->cec_rx_adap); for (i = 0; i < MAX_HDMI_OUTPUTS; i++) cec_unregister_adapter(dev->cec_tx_adap[i]); if (dev->kthread_cec) kthread_stop(dev->kthread_cec); free_dev: v4l2_device_put(&dev->v4l2_dev); return ret; } /* This routine allocates from 1 to n_devs virtual drivers. The real maximum number of virtual drivers will depend on how many drivers will succeed. This is limited to the maximum number of devices that videodev supports, which is equal to VIDEO_NUM_DEVICES. */ static int vivid_probe(struct platform_device *pdev) { const struct font_desc *font = find_font("VGA8x16"); int ret = 0, i; if (font == NULL) { pr_err("vivid: could not find font\n"); return -ENODEV; } tpg_set_font(font->data); n_devs = clamp_t(unsigned, n_devs, 1, VIVID_MAX_DEVS); for (i = 0; i < n_devs; i++) { ret = vivid_create_instance(pdev, i); if (ret) { /* If some instantiations succeeded, keep driver */ if (i) ret = 0; break; } } if (ret < 0) { pr_err("vivid: error %d while loading driver\n", ret); return ret; } /* n_devs will reflect the actual number of allocated devices */ n_devs = i; /* Determine qmenu items actually in use */ int hdmi_count = FIXED_MENU_ITEMS; int svid_count = FIXED_MENU_ITEMS; for (int i = 0; i < n_devs; i++) { struct vivid_dev *dev = vivid_devs[i]; if (!dev->has_vid_out) continue; for (int j = 0; j < dev->num_outputs && hdmi_count < MAX_MENU_ITEMS; ++j) { if (dev->output_type[j] == HDMI) { vivid_ctrl_hdmi_to_output_instance[hdmi_count] = vivid_devs[i]; vivid_ctrl_hdmi_to_output_index[hdmi_count++] = j; } } for (int j = 0; j < dev->num_outputs && svid_count < MAX_MENU_ITEMS; ++j) { if (dev->output_type[j] == SVID) { vivid_ctrl_svid_to_output_instance[svid_count] = vivid_devs[i]; vivid_ctrl_svid_to_output_index[svid_count++] = j; } } } hdmi_count = min(hdmi_count, MAX_MENU_ITEMS); svid_count = min(svid_count, MAX_MENU_ITEMS); for (int i = 0; i < n_devs; i++) { for (int j = 0; j < vivid_devs[i]->num_hdmi_inputs; j++) { struct v4l2_ctrl *c = vivid_devs[i]->ctrl_hdmi_to_output[j]; v4l2_ctrl_modify_range(c, c->minimum, hdmi_count - 1, 0, c->default_value); } for (int j = 0; j < vivid_devs[i]->num_svid_inputs; j++) { struct v4l2_ctrl *c = vivid_devs[i]->ctrl_svid_to_output[j]; v4l2_ctrl_modify_range(c, c->minimum, svid_count - 1, 0, c->default_value); } } return ret; } static void vivid_remove(struct platform_device *pdev) { struct vivid_dev *dev; unsigned int i, j; for (i = 0; i < n_devs; i++) { dev = vivid_devs[i]; if (!dev) continue; if (dev->disconnect_error) vivid_reconnect(dev); #ifdef CONFIG_MEDIA_CONTROLLER media_device_unregister(&dev->mdev); #endif if (dev->has_vid_cap) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->vid_cap_dev)); vb2_video_unregister_device(&dev->vid_cap_dev); } if (dev->has_vid_out) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->vid_out_dev)); vb2_video_unregister_device(&dev->vid_out_dev); } if (dev->has_vbi_cap) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->vbi_cap_dev)); vb2_video_unregister_device(&dev->vbi_cap_dev); } if (dev->has_vbi_out) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->vbi_out_dev)); vb2_video_unregister_device(&dev->vbi_out_dev); } if (dev->has_sdr_cap) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->sdr_cap_dev)); vb2_video_unregister_device(&dev->sdr_cap_dev); } if (dev->has_radio_rx) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->radio_rx_dev)); video_unregister_device(&dev->radio_rx_dev); } if (dev->has_radio_tx) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->radio_tx_dev)); video_unregister_device(&dev->radio_tx_dev); } if (dev->has_fb) vivid_fb_deinit(dev); if (dev->has_meta_cap) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->meta_cap_dev)); vb2_video_unregister_device(&dev->meta_cap_dev); } if (dev->has_meta_out) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->meta_out_dev)); vb2_video_unregister_device(&dev->meta_out_dev); } if (dev->has_touch_cap) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->touch_cap_dev)); vb2_video_unregister_device(&dev->touch_cap_dev); } cec_unregister_adapter(dev->cec_rx_adap); for (j = 0; j < MAX_HDMI_OUTPUTS; j++) cec_unregister_adapter(dev->cec_tx_adap[j]); if (dev->kthread_cec) kthread_stop(dev->kthread_cec); v4l2_device_put(&dev->v4l2_dev); vivid_devs[i] = NULL; } } static void vivid_pdev_release(struct device *dev) { } static struct platform_device vivid_pdev = { .name = "vivid", .dev.release = vivid_pdev_release, }; static struct platform_driver vivid_pdrv = { .probe = vivid_probe, .remove = vivid_remove, .driver = { .name = "vivid", }, }; static int __init vivid_init(void) { int hdmi_count = FIXED_MENU_ITEMS; int svid_count = FIXED_MENU_ITEMS; int ret = -ENOMEM; unsigned int ndevs; /* Sanity check, prevent insane number of vivid instances */ if (n_devs > 64) n_devs = 64; ndevs = clamp_t(unsigned int, n_devs, 1, VIVID_MAX_DEVS); for (unsigned int i = 0; i < ndevs; i++) { if (!(node_types[i] & (1 << 8))) continue; unsigned int n_outputs = min(num_outputs[i], MAX_OUTPUTS); for (u8 j = 0, k = 0; j < n_outputs && hdmi_count < MAX_MENU_ITEMS && k < MAX_HDMI_OUTPUTS; ++j) { if (output_types[i] & BIT(j)) { vivid_ctrl_hdmi_to_output_strings[hdmi_count] = kmalloc(MAX_STRING_LENGTH, GFP_KERNEL); if (!vivid_ctrl_hdmi_to_output_strings[hdmi_count]) goto free_output_strings; snprintf(vivid_ctrl_hdmi_to_output_strings[hdmi_count], MAX_STRING_LENGTH, "Output HDMI %03d-%d", i & 0xff, k); k++; hdmi_count++; } } for (u8 j = 0, k = 0; j < n_outputs && svid_count < MAX_MENU_ITEMS; ++j) { if (!(output_types[i] & BIT(j))) { vivid_ctrl_svid_to_output_strings[svid_count] = kmalloc(MAX_STRING_LENGTH, GFP_KERNEL); if (!vivid_ctrl_svid_to_output_strings[svid_count]) goto free_output_strings; snprintf(vivid_ctrl_svid_to_output_strings[svid_count], MAX_STRING_LENGTH, "Output S-Video %03d-%d", i & 0xff, k); k++; svid_count++; } } } ret = platform_device_register(&vivid_pdev); if (ret) goto free_output_strings; ret = platform_driver_register(&vivid_pdrv); if (ret) goto unreg_device; /* Initialize workqueue before module is loaded */ update_hdmi_ctrls_workqueue = create_workqueue("update_hdmi_ctrls_wq"); if (!update_hdmi_ctrls_workqueue) { ret = -ENOMEM; goto unreg_driver; } update_svid_ctrls_workqueue = create_workqueue("update_svid_ctrls_wq"); if (!update_svid_ctrls_workqueue) { ret = -ENOMEM; goto destroy_hdmi_wq; } return ret; destroy_hdmi_wq: destroy_workqueue(update_hdmi_ctrls_workqueue); unreg_driver: platform_driver_register(&vivid_pdrv); unreg_device: platform_device_unregister(&vivid_pdev); free_output_strings: for (int i = FIXED_MENU_ITEMS; i < MAX_MENU_ITEMS; i++) { kfree(vivid_ctrl_hdmi_to_output_strings[i]); kfree(vivid_ctrl_svid_to_output_strings[i]); } return ret; } static void __exit vivid_exit(void) { for (int i = FIXED_MENU_ITEMS; i < MAX_MENU_ITEMS; i++) { kfree(vivid_ctrl_hdmi_to_output_strings[i]); kfree(vivid_ctrl_svid_to_output_strings[i]); } destroy_workqueue(update_svid_ctrls_workqueue); destroy_workqueue(update_hdmi_ctrls_workqueue); platform_driver_unregister(&vivid_pdrv); platform_device_unregister(&vivid_pdev); } module_init(vivid_init); module_exit(vivid_exit); |
| 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) International Business Machines Corp., 2006 * Copyright (c) Nokia Corporation, 2006, 2007 * * Author: Artem Bityutskiy (Битюцкий Артём) */ #ifndef __UBI_UBI_H__ #define __UBI_UBI_H__ #include <linux/types.h> #include <linux/list.h> #include <linux/rbtree.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/mutex.h> #include <linux/rwsem.h> #include <linux/spinlock.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/vmalloc.h> #include <linux/notifier.h> #include <linux/mtd/mtd.h> #include <linux/mtd/ubi.h> #include <linux/pgtable.h> #include "ubi-media.h" /* Maximum number of supported UBI devices */ #define UBI_MAX_DEVICES 32 /* UBI name used for character devices, sysfs, etc */ #define UBI_NAME_STR "ubi" struct ubi_device; /* Normal UBI messages */ __printf(2, 3) void ubi_msg(const struct ubi_device *ubi, const char *fmt, ...); /* UBI warning messages */ __printf(2, 3) void ubi_warn(const struct ubi_device *ubi, const char *fmt, ...); /* UBI error messages */ __printf(2, 3) void ubi_err(const struct ubi_device *ubi, const char *fmt, ...); /* Background thread name pattern */ #define UBI_BGT_NAME_PATTERN "ubi_bgt%dd" /* * This marker in the EBA table means that the LEB is um-mapped. * NOTE! It has to have the same value as %UBI_ALL. */ #define UBI_LEB_UNMAPPED -1 /* * In case of errors, UBI tries to repeat the operation several times before * returning error. The below constant defines how many times UBI re-tries. */ #define UBI_IO_RETRIES 3 /* * Length of the protection queue. The length is effectively equivalent to the * number of (global) erase cycles PEBs are protected from the wear-leveling * worker. */ #define UBI_PROT_QUEUE_LEN 10 /* The volume ID/LEB number/erase counter is unknown */ #define UBI_UNKNOWN -1 /* * The UBI debugfs directory name pattern and maximum name length (3 for "ubi" * + 2 for the number plus 1 for the trailing zero byte. */ #define UBI_DFS_DIR_NAME "ubi%d" #define UBI_DFS_DIR_LEN (3 + 2 + 1) /* Number of physical eraseblocks reserved for atomic LEB change operation */ #define EBA_RESERVED_PEBS 1 /* * Error codes returned by the I/O sub-system. * * UBI_IO_FF: the read region of flash contains only 0xFFs * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also there was a data * integrity error reported by the MTD driver * (uncorrectable ECC error in case of NAND) * UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC) * UBI_IO_BAD_HDR_EBADMSG: the same as %UBI_IO_BAD_HDR, but also there was a * data integrity error reported by the MTD driver * (uncorrectable ECC error in case of NAND) * UBI_IO_BITFLIPS: bit-flips were detected and corrected * * Note, it is probably better to have bit-flip and ebadmsg as flags which can * be or'ed with other error code. But this is a big change because there are * may callers, so it does not worth the risk of introducing a bug */ enum { UBI_IO_FF = 1, UBI_IO_FF_BITFLIPS, UBI_IO_BAD_HDR, UBI_IO_BAD_HDR_EBADMSG, UBI_IO_BITFLIPS, }; /* * Return codes of the 'ubi_eba_copy_leb()' function. * * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source * PEB was put meanwhile, or there is I/O on the source PEB * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source * PEB * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target * PEB * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target * PEB * MOVE_TARGET_BITFLIPS: canceled because a bit-flip was detected in the * target PEB * MOVE_RETRY: retry scrubbing the PEB */ enum { MOVE_CANCEL_RACE = 1, MOVE_SOURCE_RD_ERR, MOVE_TARGET_RD_ERR, MOVE_TARGET_WR_ERR, MOVE_TARGET_BITFLIPS, MOVE_RETRY, }; /* * Return codes of the fastmap sub-system * * UBI_NO_FASTMAP: No fastmap super block was found * UBI_BAD_FASTMAP: A fastmap was found but it's unusable */ enum { UBI_NO_FASTMAP = 1, UBI_BAD_FASTMAP, }; /** * struct ubi_vid_io_buf - VID buffer used to read/write VID info to/from the * flash. * @hdr: a pointer to the VID header stored in buffer * @buffer: underlying buffer */ struct ubi_vid_io_buf { struct ubi_vid_hdr *hdr; void *buffer; }; /** * struct ubi_wl_entry - wear-leveling entry. * @u.rb: link in the corresponding (free/used) RB-tree * @u.list: link in the protection queue * @ec: erase counter * @pnum: physical eraseblock number * * This data structure is used in the WL sub-system. Each physical eraseblock * has a corresponding &struct wl_entry object which may be kept in different * RB-trees. See WL sub-system for details. */ struct ubi_wl_entry { union { struct rb_node rb; struct list_head list; } u; int ec; int pnum; }; /** * struct ubi_ltree_entry - an entry in the lock tree. * @rb: links RB-tree nodes * @vol_id: volume ID of the locked logical eraseblock * @lnum: locked logical eraseblock number * @users: how many tasks are using this logical eraseblock or wait for it * @mutex: read/write mutex to implement read/write access serialization to * the (@vol_id, @lnum) logical eraseblock * * This data structure is used in the EBA sub-system to implement per-LEB * locking. When a logical eraseblock is being locked - corresponding * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree). * See EBA sub-system for details. */ struct ubi_ltree_entry { struct rb_node rb; int vol_id; int lnum; int users; struct rw_semaphore mutex; }; /** * struct ubi_rename_entry - volume re-name description data structure. * @new_name_len: new volume name length * @new_name: new volume name * @remove: if not zero, this volume should be removed, not re-named * @desc: descriptor of the volume * @list: links re-name entries into a list * * This data structure is utilized in the multiple volume re-name code. Namely, * UBI first creates a list of &struct ubi_rename_entry objects from the * &struct ubi_rnvol_req request object, and then utilizes this list to do all * the job. */ struct ubi_rename_entry { int new_name_len; char new_name[UBI_VOL_NAME_MAX + 1]; int remove; struct ubi_volume_desc *desc; struct list_head list; }; struct ubi_volume_desc; /** * struct ubi_fastmap_layout - in-memory fastmap data structure. * @e: PEBs used by the current fastmap * @to_be_tortured: if non-zero tortured this PEB * @used_blocks: number of used PEBs * @max_pool_size: maximal size of the user pool * @max_wl_pool_size: maximal size of the pool used by the WL sub-system */ struct ubi_fastmap_layout { struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS]; int to_be_tortured[UBI_FM_MAX_BLOCKS]; int used_blocks; int max_pool_size; int max_wl_pool_size; }; /** * struct ubi_fm_pool - in-memory fastmap pool * @pebs: PEBs in this pool * @used: number of used PEBs * @size: total number of PEBs in this pool * @max_size: maximal size of the pool * * A pool gets filled with up to max_size. * If all PEBs within the pool are used a new fastmap will be written * to the flash and the pool gets refilled with empty PEBs. * */ struct ubi_fm_pool { int pebs[UBI_FM_MAX_POOL_SIZE]; int used; int size; int max_size; }; /** * struct ubi_eba_leb_desc - EBA logical eraseblock descriptor * @lnum: the logical eraseblock number * @pnum: the physical eraseblock where the LEB can be found * * This structure is here to hide EBA's internal from other part of the * UBI implementation. * * One can query the position of a LEB by calling ubi_eba_get_ldesc(). */ struct ubi_eba_leb_desc { int lnum; int pnum; }; /** * struct ubi_volume - UBI volume description data structure. * @dev: device object to make use of the Linux device model * @cdev: character device object to create character device * @ubi: reference to the UBI device description object * @vol_id: volume ID * @ref_count: volume reference count * @readers: number of users holding this volume in read-only mode * @writers: number of users holding this volume in read-write mode * @exclusive: whether somebody holds this volume in exclusive mode * @metaonly: whether somebody is altering only meta data of this volume * * @reserved_pebs: how many physical eraseblocks are reserved for this volume * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) * @usable_leb_size: logical eraseblock size without padding * @used_ebs: how many logical eraseblocks in this volume contain data * @last_eb_bytes: how many bytes are stored in the last logical eraseblock * @used_bytes: how many bytes of data this volume contains * @alignment: volume alignment * @data_pad: how many bytes are not used at the end of physical eraseblocks to * satisfy the requested alignment * @name_len: volume name length * @name: volume name * * @upd_ebs: how many eraseblocks are expected to be updated * @ch_lnum: LEB number which is being changing by the atomic LEB change * operation * @upd_bytes: how many bytes are expected to be received for volume update or * atomic LEB change * @upd_received: how many bytes were already received for volume update or * atomic LEB change * @upd_buf: update buffer which is used to collect update data or data for * atomic LEB change * * @eba_tbl: EBA table of this volume (LEB->PEB mapping) * @skip_check: %1 if CRC check of this static volume should be skipped. * Directly reflects the presence of the * %UBI_VTBL_SKIP_CRC_CHECK_FLG flag in the vtbl entry * @checked: %1 if this static volume was checked * @corrupted: %1 if the volume is corrupted (static volumes only) * @upd_marker: %1 if the update marker is set for this volume * @updating: %1 if the volume is being updated * @changing_leb: %1 if the atomic LEB change ioctl command is in progress * @direct_writes: %1 if direct writes are enabled for this volume * * @checkmap: bitmap to remember which PEB->LEB mappings got checked, * protected by UBI LEB lock tree. * * The @corrupted field indicates that the volume's contents is corrupted. * Since UBI protects only static volumes, this field is not relevant to * dynamic volumes - it is user's responsibility to assure their data * integrity. * * The @upd_marker flag indicates that this volume is either being updated at * the moment or is damaged because of an unclean reboot. */ struct ubi_volume { struct device dev; struct cdev cdev; struct ubi_device *ubi; int vol_id; int ref_count; int readers; int writers; int exclusive; int metaonly; bool is_dead; int reserved_pebs; int vol_type; int usable_leb_size; int used_ebs; int last_eb_bytes; long long used_bytes; int alignment; int data_pad; int name_len; char name[UBI_VOL_NAME_MAX + 1]; int upd_ebs; int ch_lnum; long long upd_bytes; long long upd_received; void *upd_buf; struct ubi_eba_table *eba_tbl; unsigned int skip_check:1; unsigned int checked:1; unsigned int corrupted:1; unsigned int upd_marker:1; unsigned int updating:1; unsigned int changing_leb:1; unsigned int direct_writes:1; #ifdef CONFIG_MTD_UBI_FASTMAP unsigned long *checkmap; #endif }; /** * struct ubi_volume_desc - UBI volume descriptor returned when it is opened. * @vol: reference to the corresponding volume description object * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, %UBI_EXCLUSIVE * or %UBI_METAONLY) */ struct ubi_volume_desc { struct ubi_volume *vol; int mode; }; /** * struct ubi_debug_info - debugging information for an UBI device. * * @chk_gen: if UBI general extra checks are enabled * @chk_io: if UBI I/O extra checks are enabled * @chk_fastmap: if UBI fastmap extra checks are enabled * @disable_bgt: disable the background task for testing purposes * @emulate_bitflips: emulate bit-flips for testing purposes * @emulate_io_failures: emulate write/erase failures for testing purposes * @emulate_power_cut: emulate power cut for testing purposes * @power_cut_counter: count down for writes left until emulated power cut * @power_cut_min: minimum number of writes before emulating a power cut * @power_cut_max: maximum number of writes until emulating a power cut * @emulate_failures: emulate failures for testing purposes * @dfs_dir_name: name of debugfs directory containing files of this UBI device * @dfs_dir: direntry object of the UBI device debugfs directory * @dfs_chk_gen: debugfs knob to enable UBI general extra checks * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks * @dfs_chk_fastmap: debugfs knob to enable UBI fastmap extra checks * @dfs_disable_bgt: debugfs knob to disable the background task * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures * @dfs_emulate_power_cut: debugfs knob to emulate power cuts * @dfs_power_cut_min: debugfs knob for minimum writes before power cut * @dfs_power_cut_max: debugfs knob for maximum writes until power cut * @dfs_emulate_failures: debugfs entry to control the fault injection type */ struct ubi_debug_info { unsigned int chk_gen:1; unsigned int chk_io:1; unsigned int chk_fastmap:1; unsigned int disable_bgt:1; unsigned int emulate_bitflips:1; unsigned int emulate_io_failures:1; unsigned int emulate_power_cut:2; unsigned int power_cut_counter; unsigned int power_cut_min; unsigned int power_cut_max; unsigned int emulate_failures; char dfs_dir_name[UBI_DFS_DIR_LEN]; struct dentry *dfs_dir; struct dentry *dfs_chk_gen; struct dentry *dfs_chk_io; struct dentry *dfs_chk_fastmap; struct dentry *dfs_disable_bgt; struct dentry *dfs_emulate_bitflips; struct dentry *dfs_emulate_io_failures; struct dentry *dfs_emulate_power_cut; struct dentry *dfs_power_cut_min; struct dentry *dfs_power_cut_max; struct dentry *dfs_emulate_failures; }; /** * struct ubi_device - UBI device description structure * @dev: UBI device object to use the Linux device model * @cdev: character device object to create character device * @ubi_num: UBI device number * @ubi_name: UBI device name * @vol_count: number of volumes in this UBI device * @volumes: volumes of this UBI device * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs, * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, * @vol->readers, @vol->writers, @vol->exclusive, * @vol->metaonly, @vol->ref_count, @vol->mapping and * @vol->eba_tbl. * @ref_count: count of references on the UBI device * @image_seq: image sequence number recorded on EC headers * * @rsvd_pebs: count of reserved physical eraseblocks * @avail_pebs: count of available physical eraseblocks * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB * handling * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling * * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end * of UBI initialization * @vtbl_slots: how many slots are available in the volume table * @vtbl_size: size of the volume table in bytes * @vtbl: in-RAM volume table copy * @device_mutex: protects on-flash volume table and serializes volume * creation, deletion, update, re-size, re-name and set * property * * @max_ec: current highest erase counter value * @mean_ec: current mean erase counter value * * @global_sqnum: global sequence number * @ltree_lock: protects the lock tree and @global_sqnum * @ltree: the lock tree * @alc_mutex: serializes "atomic LEB change" operations * * @fm_disabled: non-zero if fastmap is disabled (default) * @fm: in-memory data structure of the currently used fastmap * @fm_pool: in-memory data structure of the fastmap pool * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL * sub-system * @fm_protect: serializes ubi_update_fastmap(), protects @fm_buf and makes sure * that critical sections cannot be interrupted by ubi_update_fastmap() * @fm_buf: vmalloc()'d buffer which holds the raw fastmap * @fm_size: fastmap size in bytes * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes * @fm_work: fastmap work queue * @fm_work_scheduled: non-zero if fastmap work was scheduled * @fast_attach: non-zero if UBI was attached by fastmap * @fm_anchor: The next anchor PEB to use for fastmap * @fm_do_produce_anchor: If true produce an anchor PEB in wl * @fm_pool_rsv_cnt: Number of reserved PEBs for filling pool/wl_pool * * @used: RB-tree of used physical eraseblocks * @erroneous: RB-tree of erroneous used physical eraseblocks * @free: RB-tree of free physical eraseblocks * @free_count: Contains the number of elements in @free * @scrub: RB-tree of physical eraseblocks which need scrubbing * @pq: protection queue (contain physical eraseblocks which are temporarily * protected from the wear-leveling worker) * @pq_head: protection queue head * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works, * @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool, * and @fm_wl_pool fields * @move_mutex: serializes eraseblock moves * @work_sem: used to wait for all the scheduled works to finish and prevent * new works from being submitted * @wl_scheduled: non-zero if the wear-leveling was scheduled * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any * physical eraseblock * @move_from: physical eraseblock from where the data is being moved * @move_to: physical eraseblock where the data is being moved to * @move_to_put: if the "to" PEB was put * @works: list of pending works * @works_count: count of pending works * @bgt_thread: background thread description object * @thread_enabled: if the background thread is enabled * @bgt_name: background thread name * * @flash_size: underlying MTD device size (in bytes) * @peb_count: count of physical eraseblocks on the MTD device * @peb_size: physical eraseblock size * @bad_peb_limit: top limit of expected bad physical eraseblocks * @bad_peb_count: count of bad physical eraseblocks * @good_peb_count: count of good physical eraseblocks * @corr_peb_count: count of corrupted physical eraseblocks (preserved and not * used by UBI) * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks * @min_io_size: minimal input/output unit size of the underlying MTD device * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers * @ro_mode: if the UBI device is in read-only mode * @leb_size: logical eraseblock size * @leb_start: starting offset of logical eraseblocks within physical * eraseblocks * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size * @vid_hdr_offset: starting offset of the volume identifier header (might be * unaligned) * @vid_hdr_aloffset: starting offset of the VID header aligned to * @hdrs_min_io_size * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset * @bad_allowed: whether the MTD device admits bad physical eraseblocks or not * @nor_flash: non-zero if working on top of NOR flash * @max_write_size: maximum amount of bytes the underlying flash can write at a * time (MTD write buffer size) * @mtd: MTD device descriptor * * @peb_buf: a buffer of PEB size used for different purposes * @buf_mutex: protects @peb_buf * @ckvol_mutex: serializes static volume checking when opening * * @dbg: debugging information for this UBI device */ struct ubi_device { struct cdev cdev; struct device dev; int ubi_num; char ubi_name[sizeof(UBI_NAME_STR)+5]; int vol_count; struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; spinlock_t volumes_lock; int ref_count; int image_seq; bool is_dead; int rsvd_pebs; int avail_pebs; int beb_rsvd_pebs; int beb_rsvd_level; int bad_peb_limit; int autoresize_vol_id; int vtbl_slots; int vtbl_size; struct ubi_vtbl_record *vtbl; struct mutex device_mutex; int max_ec; /* Note, mean_ec is not updated run-time - should be fixed */ int mean_ec; /* EBA sub-system's stuff */ unsigned long long global_sqnum; spinlock_t ltree_lock; struct rb_root ltree; struct mutex alc_mutex; /* Fastmap stuff */ int fm_disabled; struct ubi_fastmap_layout *fm; struct ubi_fm_pool fm_pool; struct ubi_fm_pool fm_wl_pool; struct rw_semaphore fm_eba_sem; struct rw_semaphore fm_protect; void *fm_buf; size_t fm_size; struct work_struct fm_work; int fm_work_scheduled; int fast_attach; struct ubi_wl_entry *fm_anchor; int fm_do_produce_anchor; int fm_pool_rsv_cnt; /* Wear-leveling sub-system's stuff */ struct rb_root used; struct rb_root erroneous; struct rb_root free; int free_count; struct rb_root scrub; struct list_head pq[UBI_PROT_QUEUE_LEN]; int pq_head; spinlock_t wl_lock; struct mutex move_mutex; struct rw_semaphore work_sem; int wl_scheduled; struct ubi_wl_entry **lookuptbl; struct ubi_wl_entry *move_from; struct ubi_wl_entry *move_to; int move_to_put; struct list_head works; int works_count; struct task_struct *bgt_thread; int thread_enabled; char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; /* I/O sub-system's stuff */ long long flash_size; int peb_count; int peb_size; int bad_peb_count; int good_peb_count; int corr_peb_count; int erroneous_peb_count; int max_erroneous; int min_io_size; int hdrs_min_io_size; int ro_mode; int leb_size; int leb_start; int ec_hdr_alsize; int vid_hdr_alsize; int vid_hdr_offset; int vid_hdr_aloffset; int vid_hdr_shift; unsigned int bad_allowed:1; unsigned int nor_flash:1; int max_write_size; struct mtd_info *mtd; void *peb_buf; struct mutex buf_mutex; struct mutex ckvol_mutex; struct ubi_debug_info dbg; }; /** * struct ubi_ainf_peb - attach information about a physical eraseblock. * @ec: erase counter (%UBI_UNKNOWN if it is unknown) * @pnum: physical eraseblock number * @vol_id: ID of the volume this LEB belongs to * @lnum: logical eraseblock number * @scrub: if this physical eraseblock needs scrubbing * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB) * @sqnum: sequence number * @u: unions RB-tree or @list links * @u.rb: link in the per-volume RB-tree of &struct ubi_ainf_peb objects * @u.list: link in one of the eraseblock lists * * One object of this type is allocated for each physical eraseblock when * attaching an MTD device. Note, if this PEB does not belong to any LEB / * volume, the @vol_id and @lnum fields are initialized to %UBI_UNKNOWN. */ struct ubi_ainf_peb { int ec; int pnum; int vol_id; int lnum; unsigned int scrub:1; unsigned int copy_flag:1; unsigned long long sqnum; union { struct rb_node rb; struct list_head list; } u; }; /** * struct ubi_ainf_volume - attaching information about a volume. * @vol_id: volume ID * @highest_lnum: highest logical eraseblock number in this volume * @leb_count: number of logical eraseblocks in this volume * @vol_type: volume type * @used_ebs: number of used logical eraseblocks in this volume (only for * static volumes) * @last_data_size: amount of data in the last logical eraseblock of this * volume (always equivalent to the usable logical eraseblock * size in case of dynamic volumes) * @data_pad: how many bytes at the end of logical eraseblocks of this volume * are not used (due to volume alignment) * @compat: compatibility flags of this volume * @rb: link in the volume RB-tree * @root: root of the RB-tree containing all the eraseblock belonging to this * volume (&struct ubi_ainf_peb objects) * * One object of this type is allocated for each volume when attaching an MTD * device. */ struct ubi_ainf_volume { int vol_id; int highest_lnum; int leb_count; int vol_type; int used_ebs; int last_data_size; int data_pad; int compat; struct rb_node rb; struct rb_root root; }; /** * struct ubi_attach_info - MTD device attaching information. * @volumes: root of the volume RB-tree * @corr: list of corrupted physical eraseblocks * @free: list of free physical eraseblocks * @erase: list of physical eraseblocks which have to be erased * @alien: list of physical eraseblocks which should not be used by UBI (e.g., * those belonging to "preserve"-compatible internal volumes) * @fastmap: list of physical eraseblocks which relate to fastmap (e.g., * eraseblocks of the current and not yet erased old fastmap blocks) * @corr_peb_count: count of PEBs in the @corr list * @empty_peb_count: count of PEBs which are presumably empty (contain only * 0xFF bytes) * @alien_peb_count: count of PEBs in the @alien list * @bad_peb_count: count of bad physical eraseblocks * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked * as bad yet, but which look like bad * @vols_found: number of volumes found * @highest_vol_id: highest volume ID * @is_empty: flag indicating whether the MTD device is empty or not * @force_full_scan: flag indicating whether we need to do a full scan and drop all existing Fastmap data structures * @min_ec: lowest erase counter value * @max_ec: highest erase counter value * @max_sqnum: highest sequence number value * @mean_ec: mean erase counter value * @ec_sum: a temporary variable used when calculating @mean_ec * @ec_count: a temporary variable used when calculating @mean_ec * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects * @ech: temporary EC header. Only available during scan * @vidh: temporary VID buffer. Only available during scan * * This data structure contains the result of attaching an MTD device and may * be used by other UBI sub-systems to build final UBI data structures, further * error-recovery and so on. */ struct ubi_attach_info { struct rb_root volumes; struct list_head corr; struct list_head free; struct list_head erase; struct list_head alien; struct list_head fastmap; int corr_peb_count; int empty_peb_count; int alien_peb_count; int bad_peb_count; int maybe_bad_peb_count; int vols_found; int highest_vol_id; int is_empty; int force_full_scan; int min_ec; int max_ec; unsigned long long max_sqnum; int mean_ec; uint64_t ec_sum; int ec_count; struct kmem_cache *aeb_slab_cache; struct ubi_ec_hdr *ech; struct ubi_vid_io_buf *vidb; }; /** * struct ubi_work - UBI work description data structure. * @list: a link in the list of pending works * @func: worker function * @e: physical eraseblock to erase * @vol_id: the volume ID on which this erasure is being performed * @lnum: the logical eraseblock number * @torture: if the physical eraseblock has to be tortured * * The @func pointer points to the worker function. If the @shutdown argument is * not zero, the worker has to free the resources and exit immediately as the * WL sub-system is shutting down. * The worker has to return zero in case of success and a negative error code in * case of failure. */ struct ubi_work { struct list_head list; int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int shutdown); /* The below fields are only relevant to erasure works */ struct ubi_wl_entry *e; int vol_id; int lnum; int torture; }; #include "debug.h" extern struct kmem_cache *ubi_wl_entry_slab; extern const struct file_operations ubi_ctrl_cdev_operations; extern const struct file_operations ubi_cdev_operations; extern const struct file_operations ubi_vol_cdev_operations; extern const struct class ubi_class; extern struct mutex ubi_devices_mutex; extern struct blocking_notifier_head ubi_notifiers; /* attach.c */ struct ubi_ainf_peb *ubi_alloc_aeb(struct ubi_attach_info *ai, int pnum, int ec); void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb); int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips); struct ubi_ainf_volume *ubi_add_av(struct ubi_attach_info *ai, int vol_id); struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, int vol_id); void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av); struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi, struct ubi_attach_info *ai); int ubi_attach(struct ubi_device *ubi, int force_scan); /* vtbl.c */ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, struct ubi_vtbl_record *vtbl_rec); int ubi_vtbl_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list); int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai); /* vmt.c */ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl); int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list); int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol); void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol); /* upd.c */ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, long long bytes); int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, const void __user *buf, int count); int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, const struct ubi_leb_change_req *req); int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, const void __user *buf, int count); /* misc.c */ int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length); int ubi_check_volume(struct ubi_device *ubi, int vol_id); void ubi_update_reserved(struct ubi_device *ubi); void ubi_calculate_reserved(struct ubi_device *ubi); int ubi_check_pattern(const void *buf, uint8_t patt, int size); static inline bool ubi_leb_valid(struct ubi_volume *vol, int lnum) { return lnum >= 0 && lnum < vol->reserved_pebs; } /* eba.c */ struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol, int nentries); void ubi_eba_destroy_table(struct ubi_eba_table *tbl); void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst, int nentries); void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl); void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum, struct ubi_eba_leb_desc *ldesc); bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum); int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum); int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, void *buf, int offset, int len, int check); int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol, struct ubi_sgl *sgl, int lnum, int offset, int len, int check); int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, const void *buf, int offset, int len); int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, const void *buf, int len, int used_ebs); int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, const void *buf, int len); int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, struct ubi_vid_io_buf *vidb); int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai); unsigned long long ubi_next_sqnum(struct ubi_device *ubi); int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap, struct ubi_attach_info *ai_scan); /* wl.c */ int ubi_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture); int ubi_wl_get_peb(struct ubi_device *ubi); int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, int pnum, int torture); int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum); int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum); int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai); void ubi_wl_close(struct ubi_device *ubi); int ubi_thread(void *u); struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor); int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e, int lnum, int torture); int ubi_is_erase_work(struct ubi_work *wrk); void ubi_refill_pools_and_lock(struct ubi_device *ubi); int ubi_ensure_anchor_pebs(struct ubi_device *ubi); int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force_scrub); /* io.c */ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, int len); int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, int len); int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture); int ubi_io_is_bad(const struct ubi_device *ubi, int pnum); int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum); int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, struct ubi_ec_hdr *ec_hdr, int verbose); int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum, struct ubi_ec_hdr *ec_hdr); int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, struct ubi_vid_io_buf *vidb, int verbose); int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, struct ubi_vid_io_buf *vidb); /* build.c */ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset, int max_beb_per1024, bool disable_fm, bool need_resv_pool); int ubi_detach_mtd_dev(int ubi_num, int anyway); struct ubi_device *ubi_get_device(int ubi_num); void ubi_put_device(struct ubi_device *ubi); struct ubi_device *ubi_get_by_major(int major); int ubi_major2num(int major); int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype); int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb); int ubi_enumerate_volumes(struct notifier_block *nb); void ubi_free_all_volumes(struct ubi_device *ubi); void ubi_free_internal_volumes(struct ubi_device *ubi); /* kapi.c */ void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di); void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol, struct ubi_volume_info *vi); int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id); /* scan.c */ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, int pnum, const struct ubi_vid_hdr *vid_hdr); /* fastmap.c */ #ifdef CONFIG_MTD_UBI_FASTMAP size_t ubi_calc_fm_size(struct ubi_device *ubi); int ubi_update_fastmap(struct ubi_device *ubi); int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, struct ubi_attach_info *scan_ai); int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count); void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol); #else static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; } static inline int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; } static inline void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol) {} #endif /* block.c */ #ifdef CONFIG_MTD_UBI_BLOCK int ubiblock_init(void); void ubiblock_exit(void); int ubiblock_create(struct ubi_volume_info *vi); int ubiblock_remove(struct ubi_volume_info *vi); #else static inline int ubiblock_init(void) { return 0; } static inline void ubiblock_exit(void) {} static inline int ubiblock_create(struct ubi_volume_info *vi) { return -ENOSYS; } static inline int ubiblock_remove(struct ubi_volume_info *vi) { return -ENOSYS; } #endif /* * ubi_for_each_free_peb - walk the UBI free RB tree. * @ubi: UBI device description object * @e: a pointer to a ubi_wl_entry to use as cursor * @pos: a pointer to RB-tree entry type to use as a loop counter */ #define ubi_for_each_free_peb(ubi, e, tmp_rb) \ ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->free, u.rb) /* * ubi_for_each_used_peb - walk the UBI used RB tree. * @ubi: UBI device description object * @e: a pointer to a ubi_wl_entry to use as cursor * @pos: a pointer to RB-tree entry type to use as a loop counter */ #define ubi_for_each_used_peb(ubi, e, tmp_rb) \ ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->used, u.rb) /* * ubi_for_each_scub_peb - walk the UBI scub RB tree. * @ubi: UBI device description object * @e: a pointer to a ubi_wl_entry to use as cursor * @pos: a pointer to RB-tree entry type to use as a loop counter */ #define ubi_for_each_scrub_peb(ubi, e, tmp_rb) \ ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->scrub, u.rb) /* * ubi_for_each_protected_peb - walk the UBI protection queue. * @ubi: UBI device description object * @i: a integer used as counter * @e: a pointer to a ubi_wl_entry to use as cursor */ #define ubi_for_each_protected_peb(ubi, i, e) \ for ((i) = 0; (i) < UBI_PROT_QUEUE_LEN; (i)++) \ list_for_each_entry((e), &(ubi->pq[(i)]), u.list) /* * ubi_rb_for_each_entry - walk an RB-tree. * @rb: a pointer to type 'struct rb_node' to use as a loop counter * @pos: a pointer to RB-tree entry type to use as a loop counter * @root: RB-tree's root * @member: the name of the 'struct rb_node' within the RB-tree entry */ #define ubi_rb_for_each_entry(rb, pos, root, member) \ for (rb = rb_first(root), \ pos = (rb ? container_of(rb, typeof(*pos), member) : NULL); \ rb; \ rb = rb_next(rb), \ pos = (rb ? container_of(rb, typeof(*pos), member) : NULL)) /* * ubi_move_aeb_to_list - move a PEB from the volume tree to a list. * * @av: volume attaching information * @aeb: attaching eraseblock information * @list: the list to move to */ static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av, struct ubi_ainf_peb *aeb, struct list_head *list) { rb_erase(&aeb->u.rb, &av->root); list_add_tail(&aeb->u.list, list); } /** * ubi_init_vid_buf - Initialize a VID buffer * @ubi: the UBI device * @vidb: the VID buffer to initialize * @buf: the underlying buffer */ static inline void ubi_init_vid_buf(const struct ubi_device *ubi, struct ubi_vid_io_buf *vidb, void *buf) { if (buf) memset(buf, 0, ubi->vid_hdr_alsize); vidb->buffer = buf; vidb->hdr = buf + ubi->vid_hdr_shift; } /** * ubi_init_vid_buf - Allocate a VID buffer * @ubi: the UBI device * @gfp_flags: GFP flags to use for the allocation */ static inline struct ubi_vid_io_buf * ubi_alloc_vid_buf(const struct ubi_device *ubi, gfp_t gfp_flags) { struct ubi_vid_io_buf *vidb; void *buf; vidb = kzalloc(sizeof(*vidb), gfp_flags); if (!vidb) return NULL; buf = kmalloc(ubi->vid_hdr_alsize, gfp_flags); if (!buf) { kfree(vidb); return NULL; } ubi_init_vid_buf(ubi, vidb, buf); return vidb; } /** * ubi_free_vid_buf - Free a VID buffer * @vidb: the VID buffer to free */ static inline void ubi_free_vid_buf(struct ubi_vid_io_buf *vidb) { if (!vidb) return; kfree(vidb->buffer); kfree(vidb); } /** * ubi_get_vid_hdr - Get the VID header attached to a VID buffer * @vidb: VID buffer */ static inline struct ubi_vid_hdr *ubi_get_vid_hdr(struct ubi_vid_io_buf *vidb) { return vidb->hdr; } /** * ubi_ro_mode - switch to read-only mode. * @ubi: UBI device description object */ static inline void ubi_ro_mode(struct ubi_device *ubi) { if (!ubi->ro_mode) { ubi->ro_mode = 1; ubi_warn(ubi, "switch to read-only mode"); dump_stack(); } } /* * This function is equivalent to 'ubi_io_read()', but @offset is relative to * the beginning of the logical eraseblock, not to the beginning of the * physical eraseblock. */ static inline int ubi_io_read_data(const struct ubi_device *ubi, void *buf, int pnum, int offset, int len) { ubi_assert(offset >= 0); return ubi_io_read(ubi, buf, pnum, offset + ubi->leb_start, len); } /* * This function is equivalent to 'ubi_io_write()', but @offset is relative to * the beginning of the logical eraseblock, not to the beginning of the * physical eraseblock. */ static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf, int pnum, int offset, int len) { ubi_assert(offset >= 0); if (ubi_dbg_power_cut(ubi, MASK_POWER_CUT_DATA)) { ubi_warn(ubi, "XXXXX emulating a power cut when writing data XXXXX"); ubi_ro_mode(ubi); return -EROFS; } return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len); } /** * vol_id2idx - get table index by volume ID. * @ubi: UBI device description object * @vol_id: volume ID */ static inline int vol_id2idx(const struct ubi_device *ubi, int vol_id) { if (vol_id >= UBI_INTERNAL_VOL_START) return vol_id - UBI_INTERNAL_VOL_START + ubi->vtbl_slots; else return vol_id; } /** * idx2vol_id - get volume ID by table index. * @ubi: UBI device description object * @idx: table index */ static inline int idx2vol_id(const struct ubi_device *ubi, int idx) { if (idx >= ubi->vtbl_slots) return idx - ubi->vtbl_slots + UBI_INTERNAL_VOL_START; else return idx; } /** * ubi_is_fm_vol - check whether a volume ID is a Fastmap volume. * @vol_id: volume ID */ static inline bool ubi_is_fm_vol(int vol_id) { switch (vol_id) { case UBI_FM_SB_VOLUME_ID: case UBI_FM_DATA_VOLUME_ID: return true; } return false; } /** * ubi_find_fm_block - check whether a PEB is part of the current Fastmap. * @ubi: UBI device description object * @pnum: physical eraseblock to look for * * This function returns a wear leveling object if @pnum relates to the current * fastmap, @NULL otherwise. */ static inline struct ubi_wl_entry *ubi_find_fm_block(const struct ubi_device *ubi, int pnum) { int i; if (ubi->fm) { for (i = 0; i < ubi->fm->used_blocks; i++) { if (ubi->fm->e[i]->pnum == pnum) return ubi->fm->e[i]; } } return NULL; } #endif /* !__UBI_UBI_H__ */ |
| 1 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 | // SPDX-License-Identifier: GPL-2.0-only /* DVB USB framework compliant Linux driver for the HanfTek UMT-010 USB2.0 * DVB-T receiver. * * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include "dibusb.h" #include "mt352.h" DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int umt_mt352_demod_init(struct dvb_frontend *fe) { static u8 mt352_clock_config[] = { 0x89, 0xb8, 0x2d }; static u8 mt352_reset[] = { 0x50, 0x80 }; static u8 mt352_mclk_ratio[] = { 0x8b, 0x00 }; static u8 mt352_adc_ctl_1_cfg[] = { 0x8E, 0x40 }; static u8 mt352_agc_cfg[] = { 0x67, 0x10, 0xa0 }; static u8 mt352_sec_agc_cfg1[] = { 0x6a, 0xff }; static u8 mt352_sec_agc_cfg2[] = { 0x6d, 0xff }; static u8 mt352_sec_agc_cfg3[] = { 0x70, 0x40 }; static u8 mt352_sec_agc_cfg4[] = { 0x7b, 0x03 }; static u8 mt352_sec_agc_cfg5[] = { 0x7d, 0x0f }; static u8 mt352_acq_ctl[] = { 0x53, 0x50 }; static u8 mt352_input_freq_1[] = { 0x56, 0x31, 0x06 }; mt352_write(fe, mt352_clock_config, sizeof(mt352_clock_config)); udelay(2000); mt352_write(fe, mt352_reset, sizeof(mt352_reset)); mt352_write(fe, mt352_mclk_ratio, sizeof(mt352_mclk_ratio)); mt352_write(fe, mt352_adc_ctl_1_cfg, sizeof(mt352_adc_ctl_1_cfg)); mt352_write(fe, mt352_agc_cfg, sizeof(mt352_agc_cfg)); mt352_write(fe, mt352_sec_agc_cfg1, sizeof(mt352_sec_agc_cfg1)); mt352_write(fe, mt352_sec_agc_cfg2, sizeof(mt352_sec_agc_cfg2)); mt352_write(fe, mt352_sec_agc_cfg3, sizeof(mt352_sec_agc_cfg3)); mt352_write(fe, mt352_sec_agc_cfg4, sizeof(mt352_sec_agc_cfg4)); mt352_write(fe, mt352_sec_agc_cfg5, sizeof(mt352_sec_agc_cfg5)); mt352_write(fe, mt352_acq_ctl, sizeof(mt352_acq_ctl)); mt352_write(fe, mt352_input_freq_1, sizeof(mt352_input_freq_1)); return 0; } static int umt_mt352_frontend_attach(struct dvb_usb_adapter *adap) { struct mt352_config umt_config; memset(&umt_config,0,sizeof(struct mt352_config)); umt_config.demod_init = umt_mt352_demod_init; umt_config.demod_address = 0xf; adap->fe_adap[0].fe = dvb_attach(mt352_attach, &umt_config, &adap->dev->i2c_adap); return 0; } static int umt_tuner_attach (struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x61, NULL, DVB_PLL_TUA6034); return 0; } /* USB Driver stuff */ static struct dvb_usb_device_properties umt_properties; static int umt_probe(struct usb_interface *intf, const struct usb_device_id *id) { if (0 == dvb_usb_device_init(intf, &umt_properties, THIS_MODULE, NULL, adapter_nr)) return 0; return -EINVAL; } /* do not change the order of the ID table */ enum { HANFTEK_UMT_010_COLD, HANFTEK_UMT_010_WARM, }; static const struct usb_device_id umt_table[] = { DVB_USB_DEV(HANFTEK, HANFTEK_UMT_010_COLD), DVB_USB_DEV(HANFTEK, HANFTEK_UMT_010_WARM), { } }; MODULE_DEVICE_TABLE (usb, umt_table); static struct dvb_usb_device_properties umt_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-umt-010-02.fw", .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = dibusb2_0_streaming_ctrl, .frontend_attach = umt_mt352_frontend_attach, .tuner_attach = umt_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = MAX_NO_URBS_FOR_DATA_STREAM, .endpoint = 0x06, .u = { .bulk = { .buffersize = 512, } } }, }}, .size_of_priv = sizeof(struct dibusb_state), } }, .power_ctrl = dibusb_power_ctrl, .i2c_algo = &dibusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 1, .devices = { { "Hanftek UMT-010 DVB-T USB2.0", { &umt_table[HANFTEK_UMT_010_COLD], NULL }, { &umt_table[HANFTEK_UMT_010_WARM], NULL }, }, } }; static struct usb_driver umt_driver = { .name = "dvb_usb_umt_010", .probe = umt_probe, .disconnect = dvb_usb_device_exit, .id_table = umt_table, }; module_usb_driver(umt_driver); MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for HanfTek UMT 010 USB2.0 DVB-T device"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); |
| 1 1 1 1 3 1 1 1 1 5 1 1 4 5 1 1 2 1 1 2 1 1 1 1 1 1 1 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 | // SPDX-License-Identifier: GPL-2.0-only /* * kernel/ksysfs.c - sysfs attributes in /sys/kernel, which * are not related to any other subsystem * * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org> */ #include <asm/byteorder.h> #include <linux/kobject.h> #include <linux/string.h> #include <linux/sysfs.h> #include <linux/export.h> #include <linux/init.h> #include <linux/kexec.h> #include <linux/profile.h> #include <linux/stat.h> #include <linux/sched.h> #include <linux/capability.h> #include <linux/compiler.h> #include <linux/rcupdate.h> /* rcu_expedited and rcu_normal */ #if defined(__LITTLE_ENDIAN) #define CPU_BYTEORDER_STRING "little" #elif defined(__BIG_ENDIAN) #define CPU_BYTEORDER_STRING "big" #else #error Unknown byteorder #endif #define KERNEL_ATTR_RO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) #define KERNEL_ATTR_RW(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RW(_name) /* current uevent sequence number */ static ssize_t uevent_seqnum_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%llu\n", (u64)atomic64_read(&uevent_seqnum)); } KERNEL_ATTR_RO(uevent_seqnum); /* cpu byteorder */ static ssize_t cpu_byteorder_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", CPU_BYTEORDER_STRING); } KERNEL_ATTR_RO(cpu_byteorder); /* address bits */ static ssize_t address_bits_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%zu\n", sizeof(void *) * 8 /* CHAR_BIT */); } KERNEL_ATTR_RO(address_bits); #ifdef CONFIG_UEVENT_HELPER /* uevent helper program, used during early boot */ static ssize_t uevent_helper_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", uevent_helper); } static ssize_t uevent_helper_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { if (count+1 > UEVENT_HELPER_PATH_LEN) return -ENOENT; memcpy(uevent_helper, buf, count); uevent_helper[count] = '\0'; if (count && uevent_helper[count-1] == '\n') uevent_helper[count-1] = '\0'; return count; } KERNEL_ATTR_RW(uevent_helper); #endif #ifdef CONFIG_PROFILING static ssize_t profiling_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", prof_on); } static ssize_t profiling_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret; static DEFINE_MUTEX(lock); /* * We need serialization, for profile_setup() initializes prof_on * value and profile_init() must not reallocate prof_buffer after * once allocated. */ guard(mutex)(&lock); if (prof_on) return -EEXIST; /* * This eventually calls into get_option() which * has a ton of callers and is not const. It is * easiest to cast it away here. */ profile_setup((char *)buf); ret = profile_init(); if (ret) return ret; ret = create_proc_profile(); if (ret) return ret; return count; } KERNEL_ATTR_RW(profiling); #endif #ifdef CONFIG_KEXEC_CORE static ssize_t kexec_loaded_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", !!kexec_image); } KERNEL_ATTR_RO(kexec_loaded); #ifdef CONFIG_CRASH_DUMP static ssize_t kexec_crash_loaded_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", kexec_crash_loaded()); } KERNEL_ATTR_RO(kexec_crash_loaded); static ssize_t kexec_crash_size_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t size = crash_get_memory_size(); if (size < 0) return size; return sysfs_emit(buf, "%zd\n", size); } static ssize_t kexec_crash_size_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long cnt; int ret; if (kstrtoul(buf, 0, &cnt)) return -EINVAL; ret = crash_shrink_memory(cnt); return ret < 0 ? ret : count; } KERNEL_ATTR_RW(kexec_crash_size); #endif /* CONFIG_CRASH_DUMP*/ #endif /* CONFIG_KEXEC_CORE */ #ifdef CONFIG_VMCORE_INFO static ssize_t vmcoreinfo_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { phys_addr_t vmcore_base = paddr_vmcoreinfo_note(); return sysfs_emit(buf, "%pa %x\n", &vmcore_base, (unsigned int)VMCOREINFO_NOTE_SIZE); } KERNEL_ATTR_RO(vmcoreinfo); #ifdef CONFIG_CRASH_HOTPLUG static ssize_t crash_elfcorehdr_size_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { unsigned int sz = crash_get_elfcorehdr_size(); return sysfs_emit(buf, "%u\n", sz); } KERNEL_ATTR_RO(crash_elfcorehdr_size); #endif #endif /* CONFIG_VMCORE_INFO */ /* whether file capabilities are enabled */ static ssize_t fscaps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", file_caps_enabled); } KERNEL_ATTR_RO(fscaps); #ifndef CONFIG_TINY_RCU int rcu_expedited; static ssize_t rcu_expedited_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", READ_ONCE(rcu_expedited)); } static ssize_t rcu_expedited_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { if (kstrtoint(buf, 0, &rcu_expedited)) return -EINVAL; return count; } KERNEL_ATTR_RW(rcu_expedited); int rcu_normal; static ssize_t rcu_normal_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", READ_ONCE(rcu_normal)); } static ssize_t rcu_normal_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { if (kstrtoint(buf, 0, &rcu_normal)) return -EINVAL; return count; } KERNEL_ATTR_RW(rcu_normal); #endif /* #ifndef CONFIG_TINY_RCU */ /* * Make /sys/kernel/notes give the raw contents of our kernel .notes section. */ extern const void __start_notes; extern const void __stop_notes; #define notes_size (&__stop_notes - &__start_notes) static __ro_after_init BIN_ATTR_SIMPLE_RO(notes); struct kobject *kernel_kobj; EXPORT_SYMBOL_GPL(kernel_kobj); static struct attribute * kernel_attrs[] = { &fscaps_attr.attr, &uevent_seqnum_attr.attr, &cpu_byteorder_attr.attr, &address_bits_attr.attr, #ifdef CONFIG_UEVENT_HELPER &uevent_helper_attr.attr, #endif #ifdef CONFIG_PROFILING &profiling_attr.attr, #endif #ifdef CONFIG_KEXEC_CORE &kexec_loaded_attr.attr, #ifdef CONFIG_CRASH_DUMP &kexec_crash_loaded_attr.attr, &kexec_crash_size_attr.attr, #endif #endif #ifdef CONFIG_VMCORE_INFO &vmcoreinfo_attr.attr, #ifdef CONFIG_CRASH_HOTPLUG &crash_elfcorehdr_size_attr.attr, #endif #endif #ifndef CONFIG_TINY_RCU &rcu_expedited_attr.attr, &rcu_normal_attr.attr, #endif NULL }; static const struct attribute_group kernel_attr_group = { .attrs = kernel_attrs, }; static int __init ksysfs_init(void) { int error; kernel_kobj = kobject_create_and_add("kernel", NULL); if (!kernel_kobj) { error = -ENOMEM; goto exit; } error = sysfs_create_group(kernel_kobj, &kernel_attr_group); if (error) goto kset_exit; if (notes_size > 0) { bin_attr_notes.private = (void *)&__start_notes; bin_attr_notes.size = notes_size; error = sysfs_create_bin_file(kernel_kobj, &bin_attr_notes); if (error) goto group_exit; } return 0; group_exit: sysfs_remove_group(kernel_kobj, &kernel_attr_group); kset_exit: kobject_put(kernel_kobj); exit: return error; } core_initcall(ksysfs_init); |
| 5 5 5 13 13 770 771 773 770 772 1 5 4 2 1 1 1 1 1 1 2 1 1 2 1 1 2 2 1 1 2 28 12 1 1 1 2 2 8 8 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1995 Linus Torvalds * * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 * * X86-64 port * Andi Kleen. * * CPU hotplug support - ashok.raj@intel.com */ /* * This file handles the architecture-dependent parts of process handling.. */ #include <linux/cpu.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/elfcore.h> #include <linux/smp.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/ptrace.h> #include <linux/notifier.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/prctl.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/ftrace.h> #include <linux/syscalls.h> #include <linux/iommu.h> #include <asm/processor.h> #include <asm/pkru.h> #include <asm/fpu/sched.h> #include <asm/mmu_context.h> #include <asm/prctl.h> #include <asm/desc.h> #include <asm/proto.h> #include <asm/ia32.h> #include <asm/debugreg.h> #include <asm/switch_to.h> #include <asm/xen/hypervisor.h> #include <asm/vdso.h> #include <asm/resctrl.h> #include <asm/unistd.h> #include <asm/fsgsbase.h> #include <asm/fred.h> #include <asm/msr.h> #ifdef CONFIG_IA32_EMULATION /* Not included via unistd.h */ #include <asm/unistd_32_ia32.h> #endif #include "process.h" /* Prints also some state that isn't saved in the pt_regs */ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode, const char *log_lvl) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; unsigned long d0, d1, d2, d3, d6, d7; unsigned int fsindex, gsindex; unsigned int ds, es; show_iret_regs(regs, log_lvl); if (regs->orig_ax != -1) pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax); else pr_cont("\n"); printk("%sRAX: %016lx RBX: %016lx RCX: %016lx\n", log_lvl, regs->ax, regs->bx, regs->cx); printk("%sRDX: %016lx RSI: %016lx RDI: %016lx\n", log_lvl, regs->dx, regs->si, regs->di); printk("%sRBP: %016lx R08: %016lx R09: %016lx\n", log_lvl, regs->bp, regs->r8, regs->r9); printk("%sR10: %016lx R11: %016lx R12: %016lx\n", log_lvl, regs->r10, regs->r11, regs->r12); printk("%sR13: %016lx R14: %016lx R15: %016lx\n", log_lvl, regs->r13, regs->r14, regs->r15); if (mode == SHOW_REGS_SHORT) return; if (mode == SHOW_REGS_USER) { rdmsrq(MSR_FS_BASE, fs); rdmsrq(MSR_KERNEL_GS_BASE, shadowgs); printk("%sFS: %016lx GS: %016lx\n", log_lvl, fs, shadowgs); return; } asm("movl %%ds,%0" : "=r" (ds)); asm("movl %%es,%0" : "=r" (es)); asm("movl %%fs,%0" : "=r" (fsindex)); asm("movl %%gs,%0" : "=r" (gsindex)); rdmsrq(MSR_FS_BASE, fs); rdmsrq(MSR_GS_BASE, gs); rdmsrq(MSR_KERNEL_GS_BASE, shadowgs); cr0 = read_cr0(); cr2 = read_cr2(); cr3 = __read_cr3(); cr4 = __read_cr4(); printk("%sFS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", log_lvl, fs, fsindex, gs, gsindex, shadowgs); printk("%sCS: %04x DS: %04x ES: %04x CR0: %016lx\n", log_lvl, regs->cs, ds, es, cr0); printk("%sCR2: %016lx CR3: %016lx CR4: %016lx\n", log_lvl, cr2, cr3, cr4); get_debugreg(d0, 0); get_debugreg(d1, 1); get_debugreg(d2, 2); get_debugreg(d3, 3); get_debugreg(d6, 6); get_debugreg(d7, 7); /* Only print out debug registers if they are in their non-default state. */ if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) && (d6 == DR6_RESERVED) && (d7 == DR7_FIXED_1))) { printk("%sDR0: %016lx DR1: %016lx DR2: %016lx\n", log_lvl, d0, d1, d2); printk("%sDR3: %016lx DR6: %016lx DR7: %016lx\n", log_lvl, d3, d6, d7); } if (cr4 & X86_CR4_PKE) printk("%sPKRU: %08x\n", log_lvl, read_pkru()); } void release_thread(struct task_struct *dead_task) { WARN_ON(dead_task->mm); } enum which_selector { FS, GS }; /* * Out of line to be protected from kprobes and tracing. If this would be * traced or probed than any access to a per CPU variable happens with * the wrong GS. * * It is not used on Xen paravirt. When paravirt support is needed, it * needs to be renamed with native_ prefix. */ static noinstr unsigned long __rdgsbase_inactive(void) { unsigned long gsbase; lockdep_assert_irqs_disabled(); /* * SWAPGS is no longer needed thus NOT allowed with FRED because * FRED transitions ensure that an operating system can _always_ * operate with its own GS base address: * - For events that occur in ring 3, FRED event delivery swaps * the GS base address with the IA32_KERNEL_GS_BASE MSR. * - ERETU (the FRED transition that returns to ring 3) also swaps * the GS base address with the IA32_KERNEL_GS_BASE MSR. * * And the operating system can still setup the GS segment for a * user thread without the need of loading a user thread GS with: * - Using LKGS, available with FRED, to modify other attributes * of the GS segment without compromising its ability always to * operate with its own GS base address. * - Accessing the GS segment base address for a user thread as * before using RDMSR or WRMSR on the IA32_KERNEL_GS_BASE MSR. * * Note, LKGS loads the GS base address into the IA32_KERNEL_GS_BASE * MSR instead of the GS segment’s descriptor cache. As such, the * operating system never changes its runtime GS base address. */ if (!cpu_feature_enabled(X86_FEATURE_FRED) && !cpu_feature_enabled(X86_FEATURE_XENPV)) { native_swapgs(); gsbase = rdgsbase(); native_swapgs(); } else { instrumentation_begin(); rdmsrq(MSR_KERNEL_GS_BASE, gsbase); instrumentation_end(); } return gsbase; } /* * Out of line to be protected from kprobes and tracing. If this would be * traced or probed than any access to a per CPU variable happens with * the wrong GS. * * It is not used on Xen paravirt. When paravirt support is needed, it * needs to be renamed with native_ prefix. */ static noinstr void __wrgsbase_inactive(unsigned long gsbase) { lockdep_assert_irqs_disabled(); if (!cpu_feature_enabled(X86_FEATURE_FRED) && !cpu_feature_enabled(X86_FEATURE_XENPV)) { native_swapgs(); wrgsbase(gsbase); native_swapgs(); } else { instrumentation_begin(); wrmsrq(MSR_KERNEL_GS_BASE, gsbase); instrumentation_end(); } } /* * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are * not available. The goal is to be reasonably fast on non-FSGSBASE systems. * It's forcibly inlined because it'll generate better code and this function * is hot. */ static __always_inline void save_base_legacy(struct task_struct *prev_p, unsigned short selector, enum which_selector which) { if (likely(selector == 0)) { /* * On Intel (without X86_BUG_NULL_SEG), the segment base could * be the pre-existing saved base or it could be zero. On AMD * (with X86_BUG_NULL_SEG), the segment base could be almost * anything. * * This branch is very hot (it's hit twice on almost every * context switch between 64-bit programs), and avoiding * the RDMSR helps a lot, so we just assume that whatever * value is already saved is correct. This matches historical * Linux behavior, so it won't break existing applications. * * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we * report that the base is zero, it needs to actually be zero: * see the corresponding logic in load_seg_legacy. */ } else { /* * If the selector is 1, 2, or 3, then the base is zero on * !X86_BUG_NULL_SEG CPUs and could be anything on * X86_BUG_NULL_SEG CPUs. In the latter case, Linux * has never attempted to preserve the base across context * switches. * * If selector > 3, then it refers to a real segment, and * saving the base isn't necessary. */ if (which == FS) prev_p->thread.fsbase = 0; else prev_p->thread.gsbase = 0; } } static __always_inline void save_fsgs(struct task_struct *task) { savesegment(fs, task->thread.fsindex); savesegment(gs, task->thread.gsindex); if (static_cpu_has(X86_FEATURE_FSGSBASE)) { /* * If FSGSBASE is enabled, we can't make any useful guesses * about the base, and user code expects us to save the current * value. Fortunately, reading the base directly is efficient. */ task->thread.fsbase = rdfsbase(); task->thread.gsbase = __rdgsbase_inactive(); } else { save_base_legacy(task, task->thread.fsindex, FS); save_base_legacy(task, task->thread.gsindex, GS); } } /* * While a process is running,current->thread.fsbase and current->thread.gsbase * may not match the corresponding CPU registers (see save_base_legacy()). */ void current_save_fsgs(void) { unsigned long flags; /* Interrupts need to be off for FSGSBASE */ local_irq_save(flags); save_fsgs(current); local_irq_restore(flags); } #if IS_ENABLED(CONFIG_KVM) EXPORT_SYMBOL_GPL(current_save_fsgs); #endif static __always_inline void loadseg(enum which_selector which, unsigned short sel) { if (which == FS) loadsegment(fs, sel); else load_gs_index(sel); } static __always_inline void load_seg_legacy(unsigned short prev_index, unsigned long prev_base, unsigned short next_index, unsigned long next_base, enum which_selector which) { if (likely(next_index <= 3)) { /* * The next task is using 64-bit TLS, is not using this * segment at all, or is having fun with arcane CPU features. */ if (next_base == 0) { /* * Nasty case: on AMD CPUs, we need to forcibly zero * the base. */ if (static_cpu_has_bug(X86_BUG_NULL_SEG)) { loadseg(which, __USER_DS); loadseg(which, next_index); } else { /* * We could try to exhaustively detect cases * under which we can skip the segment load, * but there's really only one case that matters * for performance: if both the previous and * next states are fully zeroed, we can skip * the load. * * (This assumes that prev_base == 0 has no * false positives. This is the case on * Intel-style CPUs.) */ if (likely(prev_index | next_index | prev_base)) loadseg(which, next_index); } } else { if (prev_index != next_index) loadseg(which, next_index); wrmsrq(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE, next_base); } } else { /* * The next task is using a real segment. Loading the selector * is sufficient. */ loadseg(which, next_index); } } /* * Store prev's PKRU value and load next's PKRU value if they differ. PKRU * is not XSTATE managed on context switch because that would require a * lookup in the task's FPU xsave buffer and require to keep that updated * in various places. */ static __always_inline void x86_pkru_load(struct thread_struct *prev, struct thread_struct *next) { if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) return; /* Stash the prev task's value: */ prev->pkru = rdpkru(); /* * PKRU writes are slightly expensive. Avoid them when not * strictly necessary: */ if (prev->pkru != next->pkru) wrpkru(next->pkru); } static __always_inline void x86_fsgsbase_load(struct thread_struct *prev, struct thread_struct *next) { if (static_cpu_has(X86_FEATURE_FSGSBASE)) { /* Update the FS and GS selectors if they could have changed. */ if (unlikely(prev->fsindex || next->fsindex)) loadseg(FS, next->fsindex); if (unlikely(prev->gsindex || next->gsindex)) loadseg(GS, next->gsindex); /* Update the bases. */ wrfsbase(next->fsbase); __wrgsbase_inactive(next->gsbase); } else { load_seg_legacy(prev->fsindex, prev->fsbase, next->fsindex, next->fsbase, FS); load_seg_legacy(prev->gsindex, prev->gsbase, next->gsindex, next->gsbase, GS); } } unsigned long x86_fsgsbase_read_task(struct task_struct *task, unsigned short selector) { unsigned short idx = selector >> 3; unsigned long base; if (likely((selector & SEGMENT_TI_MASK) == 0)) { if (unlikely(idx >= GDT_ENTRIES)) return 0; /* * There are no user segments in the GDT with nonzero bases * other than the TLS segments. */ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return 0; idx -= GDT_ENTRY_TLS_MIN; base = get_desc_base(&task->thread.tls_array[idx]); } else { #ifdef CONFIG_MODIFY_LDT_SYSCALL struct ldt_struct *ldt; /* * If performance here mattered, we could protect the LDT * with RCU. This is a slow path, though, so we can just * take the mutex. */ mutex_lock(&task->mm->context.lock); ldt = task->mm->context.ldt; if (unlikely(!ldt || idx >= ldt->nr_entries)) base = 0; else base = get_desc_base(ldt->entries + idx); mutex_unlock(&task->mm->context.lock); #else base = 0; #endif } return base; } unsigned long x86_gsbase_read_cpu_inactive(void) { unsigned long gsbase; if (boot_cpu_has(X86_FEATURE_FSGSBASE)) { unsigned long flags; local_irq_save(flags); gsbase = __rdgsbase_inactive(); local_irq_restore(flags); } else { rdmsrq(MSR_KERNEL_GS_BASE, gsbase); } return gsbase; } void x86_gsbase_write_cpu_inactive(unsigned long gsbase) { if (boot_cpu_has(X86_FEATURE_FSGSBASE)) { unsigned long flags; local_irq_save(flags); __wrgsbase_inactive(gsbase); local_irq_restore(flags); } else { wrmsrq(MSR_KERNEL_GS_BASE, gsbase); } } unsigned long x86_fsbase_read_task(struct task_struct *task) { unsigned long fsbase; if (task == current) fsbase = x86_fsbase_read_cpu(); else if (boot_cpu_has(X86_FEATURE_FSGSBASE) || (task->thread.fsindex == 0)) fsbase = task->thread.fsbase; else fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex); return fsbase; } unsigned long x86_gsbase_read_task(struct task_struct *task) { unsigned long gsbase; if (task == current) gsbase = x86_gsbase_read_cpu_inactive(); else if (boot_cpu_has(X86_FEATURE_FSGSBASE) || (task->thread.gsindex == 0)) gsbase = task->thread.gsbase; else gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex); return gsbase; } void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase) { WARN_ON_ONCE(task == current); task->thread.fsbase = fsbase; } void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase) { WARN_ON_ONCE(task == current); task->thread.gsbase = gsbase; } static void start_thread_common(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp, u16 _cs, u16 _ss, u16 _ds) { WARN_ON_ONCE(regs != current_pt_regs()); if (static_cpu_has(X86_BUG_NULL_SEG)) { /* Loading zero below won't clear the base. */ loadsegment(fs, __USER_DS); load_gs_index(__USER_DS); } reset_thread_features(); loadsegment(fs, 0); loadsegment(es, _ds); loadsegment(ds, _ds); load_gs_index(0); regs->ip = new_ip; regs->sp = new_sp; regs->csx = _cs; regs->ssx = _ss; /* * Allow single-step trap and NMI when starting a new task, thus * once the new task enters user space, single-step trap and NMI * are both enabled immediately. * * Entering a new task is logically speaking a return from a * system call (exec, fork, clone, etc.). As such, if ptrace * enables single stepping a single step exception should be * allowed to trigger immediately upon entering user space. * This is not optional. * * NMI should *never* be disabled in user space. As such, this * is an optional, opportunistic way to catch errors. * * Paranoia: High-order 48 bits above the lowest 16 bit SS are * discarded by the legacy IRET instruction on all Intel, AMD, * and Cyrix/Centaur/VIA CPUs, thus can be set unconditionally, * even when FRED is not enabled. But we choose the safer side * to use these bits only when FRED is enabled. */ if (cpu_feature_enabled(X86_FEATURE_FRED)) { regs->fred_ss.swevent = true; regs->fred_ss.nmi = true; } regs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED; } void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { start_thread_common(regs, new_ip, new_sp, __USER_CS, __USER_DS, 0); } EXPORT_SYMBOL_GPL(start_thread); #ifdef CONFIG_COMPAT void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp, bool x32) { start_thread_common(regs, new_ip, new_sp, x32 ? __USER_CS : __USER32_CS, __USER_DS, __USER_DS); } #endif /* * switch_to(x,y) should switch tasks from x to y. * * This could still be optimized: * - fold all the options into a flag word and test it with a single test. * - could test fs/gs bitsliced * * Kprobes not supported here. Set the probe on schedule instead. * Function graph tracer not supported too. */ __no_kmsan_checks __visible __notrace_funcgraph struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread; struct thread_struct *next = &next_p->thread; int cpu = smp_processor_id(); WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && this_cpu_read(hardirq_stack_inuse)); switch_fpu(prev_p, cpu); /* We must save %fs and %gs before load_TLS() because * %fs and %gs may be cleared by load_TLS(). * * (e.g. xen_load_tls()) */ save_fsgs(prev_p); /* * Load TLS before restoring any segments so that segment loads * reference the correct GDT entries. */ load_TLS(next, cpu); /* * Leave lazy mode, flushing any hypercalls made here. This * must be done after loading TLS entries in the GDT but before * loading segments that might reference them. */ arch_end_context_switch(next_p); /* Switch DS and ES. * * Reading them only returns the selectors, but writing them (if * nonzero) loads the full descriptor from the GDT or LDT. The * LDT for next is loaded in switch_mm, and the GDT is loaded * above. * * We therefore need to write new values to the segment * registers on every context switch unless both the new and old * values are zero. * * Note that we don't need to do anything for CS and SS, as * those are saved and restored as part of pt_regs. */ savesegment(es, prev->es); if (unlikely(next->es | prev->es)) loadsegment(es, next->es); savesegment(ds, prev->ds); if (unlikely(next->ds | prev->ds)) loadsegment(ds, next->ds); x86_fsgsbase_load(prev, next); x86_pkru_load(prev, next); /* * Switch the PDA and FPU contexts. */ raw_cpu_write(current_task, next_p); raw_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p)); /* Reload sp0. */ update_task_stack(next_p); switch_to_extra(prev_p, next_p); if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) { /* * AMD CPUs have a misfeature: SYSRET sets the SS selector but * does not update the cached descriptor. As a result, if we * do SYSRET while SS is NULL, we'll end up in user mode with * SS apparently equal to __USER_DS but actually unusable. * * The straightforward workaround would be to fix it up just * before SYSRET, but that would slow down the system call * fast paths. Instead, we ensure that SS is never NULL in * system call context. We do this by replacing NULL SS * selectors at every context switch. SYSCALL sets up a valid * SS, so the only way to get NULL is to re-enter the kernel * from CPL 3 through an interrupt. Since that can't happen * in the same task as a running syscall, we are guaranteed to * context switch between every interrupt vector entry and a * subsequent SYSRET. * * We read SS first because SS reads are much faster than * writes. Out of caution, we force SS to __KERNEL_DS even if * it previously had a different non-NULL value. */ unsigned short ss_sel; savesegment(ss, ss_sel); if (ss_sel != __KERNEL_DS) loadsegment(ss, __KERNEL_DS); } /* Load the Intel cache allocation PQR MSR. */ resctrl_arch_sched_in(next_p); /* Reset hw history on AMD CPUs */ if (cpu_feature_enabled(X86_FEATURE_AMD_WORKLOAD_CLASS)) wrmsrl(MSR_AMD_WORKLOAD_HRST, 0x1); return prev_p; } void set_personality_64bit(void) { /* inherit personality from parent */ /* Make sure to be in 64bit mode */ clear_thread_flag(TIF_ADDR32); /* Pretend that this comes from a 64bit execve */ task_pt_regs(current)->orig_ax = __NR_execve; current_thread_info()->status &= ~TS_COMPAT; if (current->mm) __set_bit(MM_CONTEXT_HAS_VSYSCALL, ¤t->mm->context.flags); /* TBD: overwrites user setup. Should have two bits. But 64bit processes have always behaved this way, so it's not too bad. The main problem is just that 32bit children are affected again. */ current->personality &= ~READ_IMPLIES_EXEC; } static void __set_personality_x32(void) { #ifdef CONFIG_X86_X32_ABI if (current->mm) current->mm->context.flags = 0; current->personality &= ~READ_IMPLIES_EXEC; /* * in_32bit_syscall() uses the presence of the x32 syscall bit * flag to determine compat status. The x86 mmap() code relies on * the syscall bitness so set x32 syscall bit right here to make * in_32bit_syscall() work during exec(). * * Pretend to come from a x32 execve. */ task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT; current_thread_info()->status &= ~TS_COMPAT; #endif } static void __set_personality_ia32(void) { #ifdef CONFIG_IA32_EMULATION if (current->mm) { /* * uprobes applied to this MM need to know this and * cannot use user_64bit_mode() at that time. */ __set_bit(MM_CONTEXT_UPROBE_IA32, ¤t->mm->context.flags); } current->personality |= force_personality32; /* Prepare the first "return" to user space */ task_pt_regs(current)->orig_ax = __NR_ia32_execve; current_thread_info()->status |= TS_COMPAT; #endif } void set_personality_ia32(bool x32) { /* Make sure to be in 32bit mode */ set_thread_flag(TIF_ADDR32); if (x32) __set_personality_x32(); else __set_personality_ia32(); } EXPORT_SYMBOL_GPL(set_personality_ia32); #ifdef CONFIG_CHECKPOINT_RESTORE static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr) { int ret; ret = map_vdso_once(image, addr); if (ret) return ret; return (long)image->size; } #endif #ifdef CONFIG_ADDRESS_MASKING #define LAM_U57_BITS 6 static void enable_lam_func(void *__mm) { struct mm_struct *mm = __mm; unsigned long lam; if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm) { lam = mm_lam_cr3_mask(mm); write_cr3(__read_cr3() | lam); cpu_tlbstate_update_lam(lam, mm_untag_mask(mm)); } } static void mm_enable_lam(struct mm_struct *mm) { mm->context.lam_cr3_mask = X86_CR3_LAM_U57; mm->context.untag_mask = ~GENMASK(62, 57); /* * Even though the process must still be single-threaded at this * point, kernel threads may be using the mm. IPI those kernel * threads if they exist. */ on_each_cpu_mask(mm_cpumask(mm), enable_lam_func, mm, true); set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags); } static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) { if (!cpu_feature_enabled(X86_FEATURE_LAM)) return -ENODEV; /* PTRACE_ARCH_PRCTL */ if (current->mm != mm) return -EINVAL; if (mm_valid_pasid(mm) && !test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags)) return -EINVAL; if (mmap_write_lock_killable(mm)) return -EINTR; /* * MM_CONTEXT_LOCK_LAM is set on clone. Prevent LAM from * being enabled unless the process is single threaded: */ if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) { mmap_write_unlock(mm); return -EBUSY; } if (!nr_bits || nr_bits > LAM_U57_BITS) { mmap_write_unlock(mm); return -EINVAL; } mm_enable_lam(mm); mmap_write_unlock(mm); return 0; } #endif long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2) { int ret = 0; switch (option) { case ARCH_SET_GS: { if (unlikely(arg2 >= TASK_SIZE_MAX)) return -EPERM; preempt_disable(); /* * ARCH_SET_GS has always overwritten the index * and the base. Zero is the most sensible value * to put in the index, and is the only value that * makes any sense if FSGSBASE is unavailable. */ if (task == current) { loadseg(GS, 0); x86_gsbase_write_cpu_inactive(arg2); /* * On non-FSGSBASE systems, save_base_legacy() expects * that we also fill in thread.gsbase. */ task->thread.gsbase = arg2; } else { task->thread.gsindex = 0; x86_gsbase_write_task(task, arg2); } preempt_enable(); break; } case ARCH_SET_FS: { /* * Not strictly needed for %fs, but do it for symmetry * with %gs */ if (unlikely(arg2 >= TASK_SIZE_MAX)) return -EPERM; preempt_disable(); /* * Set the selector to 0 for the same reason * as %gs above. */ if (task == current) { loadseg(FS, 0); x86_fsbase_write_cpu(arg2); /* * On non-FSGSBASE systems, save_base_legacy() expects * that we also fill in thread.fsbase. */ task->thread.fsbase = arg2; } else { task->thread.fsindex = 0; x86_fsbase_write_task(task, arg2); } preempt_enable(); break; } case ARCH_GET_FS: { unsigned long base = x86_fsbase_read_task(task); ret = put_user(base, (unsigned long __user *)arg2); break; } case ARCH_GET_GS: { unsigned long base = x86_gsbase_read_task(task); ret = put_user(base, (unsigned long __user *)arg2); break; } #ifdef CONFIG_CHECKPOINT_RESTORE # ifdef CONFIG_X86_X32_ABI case ARCH_MAP_VDSO_X32: return prctl_map_vdso(&vdso_image_x32, arg2); # endif # ifdef CONFIG_IA32_EMULATION case ARCH_MAP_VDSO_32: return prctl_map_vdso(&vdso_image_32, arg2); # endif case ARCH_MAP_VDSO_64: return prctl_map_vdso(&vdso_image_64, arg2); #endif #ifdef CONFIG_ADDRESS_MASKING case ARCH_GET_UNTAG_MASK: return put_user(task->mm->context.untag_mask, (unsigned long __user *)arg2); case ARCH_ENABLE_TAGGED_ADDR: return prctl_enable_tagged_addr(task->mm, arg2); case ARCH_FORCE_TAGGED_SVA: if (current != task) return -EINVAL; set_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &task->mm->context.flags); return 0; case ARCH_GET_MAX_TAG_BITS: if (!cpu_feature_enabled(X86_FEATURE_LAM)) return put_user(0, (unsigned long __user *)arg2); else return put_user(LAM_U57_BITS, (unsigned long __user *)arg2); #endif case ARCH_SHSTK_ENABLE: case ARCH_SHSTK_DISABLE: case ARCH_SHSTK_LOCK: case ARCH_SHSTK_UNLOCK: case ARCH_SHSTK_STATUS: return shstk_prctl(task, option, arg2); default: ret = -EINVAL; break; } return ret; } |
| 50 246 8 1271 1279 404 584 19 143 51 187 574 22 4 184 66 106 108 108 108 106 108 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 | // SPDX-License-Identifier: GPL-2.0-only /* * IPv6 library code, needed by static components when full IPv6 support is * not configured or static. */ #include <linux/export.h> #include <net/ipv6.h> #include <net/ipv6_stubs.h> #include <net/addrconf.h> #include <net/ip.h> /* if ipv6 module registers this function is used by xfrm to force all * sockets to relookup their nodes - this is fairly expensive, be * careful */ void (*__fib6_flush_trees)(struct net *); EXPORT_SYMBOL(__fib6_flush_trees); #define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16) static inline unsigned int ipv6_addr_scope2type(unsigned int scope) { switch (scope) { case IPV6_ADDR_SCOPE_NODELOCAL: return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) | IPV6_ADDR_LOOPBACK); case IPV6_ADDR_SCOPE_LINKLOCAL: return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL) | IPV6_ADDR_LINKLOCAL); case IPV6_ADDR_SCOPE_SITELOCAL: return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_SITELOCAL) | IPV6_ADDR_SITELOCAL); } return IPV6_ADDR_SCOPE_TYPE(scope); } int __ipv6_addr_type(const struct in6_addr *addr) { __be32 st; st = addr->s6_addr32[0]; /* Consider all addresses with the first three bits different of 000 and 111 as unicasts. */ if ((st & htonl(0xE0000000)) != htonl(0x00000000) && (st & htonl(0xE0000000)) != htonl(0xE0000000)) return (IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); if ((st & htonl(0xFF000000)) == htonl(0xFF000000)) { /* multicast */ /* addr-select 3.1 */ return (IPV6_ADDR_MULTICAST | ipv6_addr_scope2type(IPV6_ADDR_MC_SCOPE(addr))); } if ((st & htonl(0xFFC00000)) == htonl(0xFE800000)) return (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL)); /* addr-select 3.1 */ if ((st & htonl(0xFFC00000)) == htonl(0xFEC00000)) return (IPV6_ADDR_SITELOCAL | IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_SITELOCAL)); /* addr-select 3.1 */ if ((st & htonl(0xFE000000)) == htonl(0xFC000000)) return (IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* RFC 4193 */ if ((addr->s6_addr32[0] | addr->s6_addr32[1]) == 0) { if (addr->s6_addr32[2] == 0) { if (addr->s6_addr32[3] == 0) return IPV6_ADDR_ANY; if (addr->s6_addr32[3] == htonl(0x00000001)) return (IPV6_ADDR_LOOPBACK | IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL)); /* addr-select 3.4 */ return (IPV6_ADDR_COMPATv4 | IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */ } if (addr->s6_addr32[2] == htonl(0x0000ffff)) return (IPV6_ADDR_MAPPED | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */ } return (IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.4 */ } EXPORT_SYMBOL(__ipv6_addr_type); static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); static BLOCKING_NOTIFIER_HEAD(inet6addr_validator_chain); int register_inet6addr_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&inet6addr_chain, nb); } EXPORT_SYMBOL(register_inet6addr_notifier); int unregister_inet6addr_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&inet6addr_chain, nb); } EXPORT_SYMBOL(unregister_inet6addr_notifier); int inet6addr_notifier_call_chain(unsigned long val, void *v) { return atomic_notifier_call_chain(&inet6addr_chain, val, v); } EXPORT_SYMBOL(inet6addr_notifier_call_chain); int register_inet6addr_validator_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&inet6addr_validator_chain, nb); } EXPORT_SYMBOL(register_inet6addr_validator_notifier); int unregister_inet6addr_validator_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&inet6addr_validator_chain, nb); } EXPORT_SYMBOL(unregister_inet6addr_validator_notifier); int inet6addr_validator_notifier_call_chain(unsigned long val, void *v) { return blocking_notifier_call_chain(&inet6addr_validator_chain, val, v); } EXPORT_SYMBOL(inet6addr_validator_notifier_call_chain); static struct dst_entry *eafnosupport_ipv6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst) { return ERR_PTR(-EAFNOSUPPORT); } static int eafnosupport_ipv6_route_input(struct sk_buff *skb) { return -EAFNOSUPPORT; } static struct fib6_table *eafnosupport_fib6_get_table(struct net *net, u32 id) { return NULL; } static int eafnosupport_fib6_table_lookup(struct net *net, struct fib6_table *table, int oif, struct flowi6 *fl6, struct fib6_result *res, int flags) { return -EAFNOSUPPORT; } static int eafnosupport_fib6_lookup(struct net *net, int oif, struct flowi6 *fl6, struct fib6_result *res, int flags) { return -EAFNOSUPPORT; } static void eafnosupport_fib6_select_path(const struct net *net, struct fib6_result *res, struct flowi6 *fl6, int oif, bool have_oif_match, const struct sk_buff *skb, int strict) { } static u32 eafnosupport_ip6_mtu_from_fib6(const struct fib6_result *res, const struct in6_addr *daddr, const struct in6_addr *saddr) { return 0; } static int eafnosupport_fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack) { NL_SET_ERR_MSG(extack, "IPv6 support not enabled in kernel"); return -EAFNOSUPPORT; } static int eafnosupport_ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify) { return -EAFNOSUPPORT; } static int eafnosupport_ipv6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)) { kfree_skb(skb); return -EAFNOSUPPORT; } static struct net_device *eafnosupport_ipv6_dev_find(struct net *net, const struct in6_addr *addr, struct net_device *dev) { return ERR_PTR(-EAFNOSUPPORT); } const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) { .ipv6_dst_lookup_flow = eafnosupport_ipv6_dst_lookup_flow, .ipv6_route_input = eafnosupport_ipv6_route_input, .fib6_get_table = eafnosupport_fib6_get_table, .fib6_table_lookup = eafnosupport_fib6_table_lookup, .fib6_lookup = eafnosupport_fib6_lookup, .fib6_select_path = eafnosupport_fib6_select_path, .ip6_mtu_from_fib6 = eafnosupport_ip6_mtu_from_fib6, .fib6_nh_init = eafnosupport_fib6_nh_init, .ip6_del_rt = eafnosupport_ip6_del_rt, .ipv6_fragment = eafnosupport_ipv6_fragment, .ipv6_dev_find = eafnosupport_ipv6_dev_find, }; EXPORT_SYMBOL_GPL(ipv6_stub); /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */ const struct in6_addr in6addr_loopback __aligned(BITS_PER_LONG/8) = IN6ADDR_LOOPBACK_INIT; EXPORT_SYMBOL(in6addr_loopback); const struct in6_addr in6addr_any __aligned(BITS_PER_LONG/8) = IN6ADDR_ANY_INIT; EXPORT_SYMBOL(in6addr_any); const struct in6_addr in6addr_linklocal_allnodes __aligned(BITS_PER_LONG/8) = IN6ADDR_LINKLOCAL_ALLNODES_INIT; EXPORT_SYMBOL(in6addr_linklocal_allnodes); const struct in6_addr in6addr_linklocal_allrouters __aligned(BITS_PER_LONG/8) = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; EXPORT_SYMBOL(in6addr_linklocal_allrouters); const struct in6_addr in6addr_interfacelocal_allnodes __aligned(BITS_PER_LONG/8) = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT; EXPORT_SYMBOL(in6addr_interfacelocal_allnodes); const struct in6_addr in6addr_interfacelocal_allrouters __aligned(BITS_PER_LONG/8) = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT; EXPORT_SYMBOL(in6addr_interfacelocal_allrouters); const struct in6_addr in6addr_sitelocal_allrouters __aligned(BITS_PER_LONG/8) = IN6ADDR_SITELOCAL_ALLROUTERS_INIT; EXPORT_SYMBOL(in6addr_sitelocal_allrouters); static void snmp6_free_dev(struct inet6_dev *idev) { kfree(idev->stats.icmpv6msgdev); kfree(idev->stats.icmpv6dev); free_percpu(idev->stats.ipv6); } static void in6_dev_finish_destroy_rcu(struct rcu_head *head) { struct inet6_dev *idev = container_of(head, struct inet6_dev, rcu); snmp6_free_dev(idev); kfree(idev); } /* Nobody refers to this device, we may destroy it. */ void in6_dev_finish_destroy(struct inet6_dev *idev) { struct net_device *dev = idev->dev; WARN_ON(!list_empty(&idev->addr_list)); WARN_ON(rcu_access_pointer(idev->mc_list)); WARN_ON(timer_pending(&idev->rs_timer)); #ifdef NET_REFCNT_DEBUG pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL"); #endif netdev_put(dev, &idev->dev_tracker); if (!idev->dead) { pr_warn("Freeing alive inet6 device %p\n", idev); return; } call_rcu(&idev->rcu, in6_dev_finish_destroy_rcu); } EXPORT_SYMBOL(in6_dev_finish_destroy); |
| 10 10 2 608 602 8 3 9 8 8 11 12 19 1 29 28 18 12 10 10 10 10 9 1 2 2 1 9 10 11 11 7 4 4 4 11 11 41 16 11 7 7 7 7 29 4 16 16 71 72 50 32 54 25 24 1 24 18 1 18 1 1 1 16 1 3 24 51 56 46 24 38 5 47 13 1 8 7 18 50 50 48 3 50 1 45 4 55 56 5 51 7 1 7 65 5 65 65 19 14 8 2 16 9 1 1 1 5 11 4 1 2 2 2 12 1 7 1 6 1 6 2 10 10 16 16 12 6 1 20 20 19 18 20 36 31 7 2 4 2 1 29 4 4 1 1 1 1 2 10 21 9 13 4 1 2 1 3 30 1 7 1 27 2 26 27 10 10 7 2 7 10 1 6 4 10 10 2 3 3 3 3 1 3 3 3 3 250 253 250 47 30 20 8 65 36 45 11 3 1 1 1 2 1 1 1 2 1 1 1 3 1 10 5 15 40 40 259 5 1 13 13 14 254 215 14 24 180 55 70 227 22 66 74 101 164 55 230 270 230 44 193 250 25 44 185 260 257 236 259 1 258 259 254 254 254 254 14 238 256 2 252 231 254 5 5 2 1 1 1 2 5 14 1 1 1 1 10 4 5 11 1 1 1 1 15 20 7 1 10 2 18 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 | // SPDX-License-Identifier: GPL-2.0 /* * linux/mm/madvise.c * * Copyright (C) 1999 Linus Torvalds * Copyright (C) 2002 Christoph Hellwig */ #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/syscalls.h> #include <linux/mempolicy.h> #include <linux/page-isolation.h> #include <linux/page_idle.h> #include <linux/userfaultfd_k.h> #include <linux/hugetlb.h> #include <linux/falloc.h> #include <linux/fadvise.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/mm_inline.h> #include <linux/mmu_context.h> #include <linux/string.h> #include <linux/uio.h> #include <linux/ksm.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/pagewalk.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/shmem_fs.h> #include <linux/mmu_notifier.h> #include <asm/tlb.h> #include "internal.h" #include "swap.h" #define __MADV_SET_ANON_VMA_NAME (-1) /* * Maximum number of attempts we make to install guard pages before we give up * and return -ERESTARTNOINTR to have userspace try again. */ #define MAX_MADVISE_GUARD_RETRIES 3 struct madvise_walk_private { struct mmu_gather *tlb; bool pageout; }; enum madvise_lock_mode { MADVISE_NO_LOCK, MADVISE_MMAP_READ_LOCK, MADVISE_MMAP_WRITE_LOCK, MADVISE_VMA_READ_LOCK, }; struct madvise_behavior_range { unsigned long start; unsigned long end; }; struct madvise_behavior { struct mm_struct *mm; int behavior; struct mmu_gather *tlb; enum madvise_lock_mode lock_mode; struct anon_vma_name *anon_name; /* * The range over which the behaviour is currently being applied. If * traversing multiple VMAs, this is updated for each. */ struct madvise_behavior_range range; /* The VMA and VMA preceding it (if applicable) currently targeted. */ struct vm_area_struct *prev; struct vm_area_struct *vma; bool lock_dropped; }; #ifdef CONFIG_ANON_VMA_NAME static int madvise_walk_vmas(struct madvise_behavior *madv_behavior); struct anon_vma_name *anon_vma_name_alloc(const char *name) { struct anon_vma_name *anon_name; size_t count; /* Add 1 for NUL terminator at the end of the anon_name->name */ count = strlen(name) + 1; anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL); if (anon_name) { kref_init(&anon_name->kref); memcpy(anon_name->name, name, count); } return anon_name; } void anon_vma_name_free(struct kref *kref) { struct anon_vma_name *anon_name = container_of(kref, struct anon_vma_name, kref); kfree(anon_name); } struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) { if (!rwsem_is_locked(&vma->vm_mm->mmap_lock)) vma_assert_locked(vma); return vma->anon_name; } /* mmap_lock should be write-locked */ static int replace_anon_vma_name(struct vm_area_struct *vma, struct anon_vma_name *anon_name) { struct anon_vma_name *orig_name = anon_vma_name(vma); if (!anon_name) { vma->anon_name = NULL; anon_vma_name_put(orig_name); return 0; } if (anon_vma_name_eq(orig_name, anon_name)) return 0; vma->anon_name = anon_vma_name_reuse(anon_name); anon_vma_name_put(orig_name); return 0; } #else /* CONFIG_ANON_VMA_NAME */ static int replace_anon_vma_name(struct vm_area_struct *vma, struct anon_vma_name *anon_name) { if (anon_name) return -EINVAL; return 0; } #endif /* CONFIG_ANON_VMA_NAME */ /* * Update the vm_flags or anon_name on region of a vma, splitting it or merging * it as necessary. Must be called with mmap_lock held for writing. */ static int madvise_update_vma(vm_flags_t new_flags, struct madvise_behavior *madv_behavior) { struct vm_area_struct *vma = madv_behavior->vma; struct madvise_behavior_range *range = &madv_behavior->range; struct anon_vma_name *anon_name = madv_behavior->anon_name; bool set_new_anon_name = madv_behavior->behavior == __MADV_SET_ANON_VMA_NAME; VMA_ITERATOR(vmi, madv_behavior->mm, range->start); if (new_flags == vma->vm_flags && (!set_new_anon_name || anon_vma_name_eq(anon_vma_name(vma), anon_name))) return 0; if (set_new_anon_name) vma = vma_modify_name(&vmi, madv_behavior->prev, vma, range->start, range->end, anon_name); else vma = vma_modify_flags(&vmi, madv_behavior->prev, vma, range->start, range->end, new_flags); if (IS_ERR(vma)) return PTR_ERR(vma); madv_behavior->vma = vma; /* vm_flags is protected by the mmap_lock held in write mode. */ vma_start_write(vma); vm_flags_reset(vma, new_flags); if (set_new_anon_name) return replace_anon_vma_name(vma, anon_name); return 0; } #ifdef CONFIG_SWAP static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->private; struct swap_iocb *splug = NULL; pte_t *ptep = NULL; spinlock_t *ptl; unsigned long addr; for (addr = start; addr < end; addr += PAGE_SIZE) { pte_t pte; swp_entry_t entry; struct folio *folio; if (!ptep++) { ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (!ptep) break; } pte = ptep_get(ptep); if (!is_swap_pte(pte)) continue; entry = pte_to_swp_entry(pte); if (unlikely(non_swap_entry(entry))) continue; pte_unmap_unlock(ptep, ptl); ptep = NULL; folio = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, vma, addr, &splug); if (folio) folio_put(folio); } if (ptep) pte_unmap_unlock(ptep, ptl); swap_read_unplug(splug); cond_resched(); return 0; } static const struct mm_walk_ops swapin_walk_ops = { .pmd_entry = swapin_walk_pmd_entry, .walk_lock = PGWALK_RDLOCK, }; static void shmem_swapin_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct address_space *mapping) { XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); pgoff_t end_index = linear_page_index(vma, end) - 1; struct folio *folio; struct swap_iocb *splug = NULL; rcu_read_lock(); xas_for_each(&xas, folio, end_index) { unsigned long addr; swp_entry_t entry; if (!xa_is_value(folio)) continue; entry = radix_to_swp_entry(folio); /* There might be swapin error entries in shmem mapping. */ if (non_swap_entry(entry)) continue; addr = vma->vm_start + ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT); xas_pause(&xas); rcu_read_unlock(); folio = read_swap_cache_async(entry, mapping_gfp_mask(mapping), vma, addr, &splug); if (folio) folio_put(folio); rcu_read_lock(); } rcu_read_unlock(); swap_read_unplug(splug); } #endif /* CONFIG_SWAP */ static void mark_mmap_lock_dropped(struct madvise_behavior *madv_behavior) { VM_WARN_ON_ONCE(madv_behavior->lock_mode == MADVISE_VMA_READ_LOCK); madv_behavior->lock_dropped = true; } /* * Schedule all required I/O operations. Do not wait for completion. */ static long madvise_willneed(struct madvise_behavior *madv_behavior) { struct vm_area_struct *vma = madv_behavior->vma; struct mm_struct *mm = madv_behavior->mm; struct file *file = vma->vm_file; unsigned long start = madv_behavior->range.start; unsigned long end = madv_behavior->range.end; loff_t offset; #ifdef CONFIG_SWAP if (!file) { walk_page_range_vma(vma, start, end, &swapin_walk_ops, vma); lru_add_drain(); /* Push any new pages onto the LRU now */ return 0; } if (shmem_mapping(file->f_mapping)) { shmem_swapin_range(vma, start, end, file->f_mapping); lru_add_drain(); /* Push any new pages onto the LRU now */ return 0; } #else if (!file) return -EBADF; #endif if (IS_DAX(file_inode(file))) { /* no bad return value, but ignore advice */ return 0; } /* * Filesystem's fadvise may need to take various locks. We need to * explicitly grab a reference because the vma (and hence the * vma's reference to the file) can go away as soon as we drop * mmap_lock. */ mark_mmap_lock_dropped(madv_behavior); get_file(file); offset = (loff_t)(start - vma->vm_start) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); mmap_read_unlock(mm); vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); fput(file); mmap_read_lock(mm); return 0; } static inline bool can_do_file_pageout(struct vm_area_struct *vma) { if (!vma->vm_file) return false; /* * paging out pagecache only for non-anonymous mappings that correspond * to the files the calling process could (if tried) open for writing; * otherwise we'd be including shared non-exclusive mappings, which * opens a side channel. */ return inode_owner_or_capable(&nop_mnt_idmap, file_inode(vma->vm_file)) || file_permission(vma->vm_file, MAY_WRITE) == 0; } static inline int madvise_folio_pte_batch(unsigned long addr, unsigned long end, struct folio *folio, pte_t *ptep, pte_t *ptentp) { int max_nr = (end - addr) / PAGE_SIZE; return folio_pte_batch_flags(folio, NULL, ptep, ptentp, max_nr, FPB_MERGE_YOUNG_DIRTY); } static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct madvise_walk_private *private = walk->private; struct mmu_gather *tlb = private->tlb; bool pageout = private->pageout; struct mm_struct *mm = tlb->mm; struct vm_area_struct *vma = walk->vma; pte_t *start_pte, *pte, ptent; spinlock_t *ptl; struct folio *folio = NULL; LIST_HEAD(folio_list); bool pageout_anon_only_filter; unsigned int batch_count = 0; int nr; if (fatal_signal_pending(current)) return -EINTR; pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) && !can_do_file_pageout(vma); #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (pmd_trans_huge(*pmd)) { pmd_t orig_pmd; unsigned long next = pmd_addr_end(addr, end); tlb_change_page_size(tlb, HPAGE_PMD_SIZE); ptl = pmd_trans_huge_lock(pmd, vma); if (!ptl) return 0; orig_pmd = *pmd; if (is_huge_zero_pmd(orig_pmd)) goto huge_unlock; if (unlikely(!pmd_present(orig_pmd))) { VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(orig_pmd)); goto huge_unlock; } folio = pmd_folio(orig_pmd); /* Do not interfere with other mappings of this folio */ if (folio_maybe_mapped_shared(folio)) goto huge_unlock; if (pageout_anon_only_filter && !folio_test_anon(folio)) goto huge_unlock; if (next - addr != HPAGE_PMD_SIZE) { int err; folio_get(folio); spin_unlock(ptl); folio_lock(folio); err = split_folio(folio); folio_unlock(folio); folio_put(folio); if (!err) goto regular_folio; return 0; } if (!pageout && pmd_young(orig_pmd)) { pmdp_invalidate(vma, addr, pmd); orig_pmd = pmd_mkold(orig_pmd); set_pmd_at(mm, addr, pmd, orig_pmd); tlb_remove_pmd_tlb_entry(tlb, pmd, addr); } folio_clear_referenced(folio); folio_test_clear_young(folio); if (folio_test_active(folio)) folio_set_workingset(folio); if (pageout) { if (folio_isolate_lru(folio)) { if (folio_test_unevictable(folio)) folio_putback_lru(folio); else list_add(&folio->lru, &folio_list); } } else folio_deactivate(folio); huge_unlock: spin_unlock(ptl); if (pageout) reclaim_pages(&folio_list); return 0; } regular_folio: #endif tlb_change_page_size(tlb, PAGE_SIZE); restart: start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (!start_pte) return 0; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); for (; addr < end; pte += nr, addr += nr * PAGE_SIZE) { nr = 1; ptent = ptep_get(pte); if (++batch_count == SWAP_CLUSTER_MAX) { batch_count = 0; if (need_resched()) { arch_leave_lazy_mmu_mode(); pte_unmap_unlock(start_pte, ptl); cond_resched(); goto restart; } } if (pte_none(ptent)) continue; if (!pte_present(ptent)) continue; folio = vm_normal_folio(vma, addr, ptent); if (!folio || folio_is_zone_device(folio)) continue; /* * If we encounter a large folio, only split it if it is not * fully mapped within the range we are operating on. Otherwise * leave it as is so that it can be swapped out whole. If we * fail to split a folio, leave it in place and advance to the * next pte in the range. */ if (folio_test_large(folio)) { nr = madvise_folio_pte_batch(addr, end, folio, pte, &ptent); if (nr < folio_nr_pages(folio)) { int err; if (folio_maybe_mapped_shared(folio)) continue; if (pageout_anon_only_filter && !folio_test_anon(folio)) continue; if (!folio_trylock(folio)) continue; folio_get(folio); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(start_pte, ptl); start_pte = NULL; err = split_folio(folio); folio_unlock(folio); folio_put(folio); start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!start_pte) break; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); if (!err) nr = 0; continue; } } /* * Do not interfere with other mappings of this folio and * non-LRU folio. If we have a large folio at this point, we * know it is fully mapped so if its mapcount is the same as its * number of pages, it must be exclusive. */ if (!folio_test_lru(folio) || folio_mapcount(folio) != folio_nr_pages(folio)) continue; if (pageout_anon_only_filter && !folio_test_anon(folio)) continue; if (!pageout && pte_young(ptent)) { clear_young_dirty_ptes(vma, addr, pte, nr, CYDP_CLEAR_YOUNG); tlb_remove_tlb_entries(tlb, pte, nr, addr); } /* * We are deactivating a folio for accelerating reclaiming. * VM couldn't reclaim the folio unless we clear PG_young. * As a side effect, it makes confuse idle-page tracking * because they will miss recent referenced history. */ folio_clear_referenced(folio); folio_test_clear_young(folio); if (folio_test_active(folio)) folio_set_workingset(folio); if (pageout) { if (folio_isolate_lru(folio)) { if (folio_test_unevictable(folio)) folio_putback_lru(folio); else list_add(&folio->lru, &folio_list); } } else folio_deactivate(folio); } if (start_pte) { arch_leave_lazy_mmu_mode(); pte_unmap_unlock(start_pte, ptl); } if (pageout) reclaim_pages(&folio_list); cond_resched(); return 0; } static const struct mm_walk_ops cold_walk_ops = { .pmd_entry = madvise_cold_or_pageout_pte_range, .walk_lock = PGWALK_RDLOCK, }; static void madvise_cold_page_range(struct mmu_gather *tlb, struct madvise_behavior *madv_behavior) { struct vm_area_struct *vma = madv_behavior->vma; struct madvise_behavior_range *range = &madv_behavior->range; struct madvise_walk_private walk_private = { .pageout = false, .tlb = tlb, }; tlb_start_vma(tlb, vma); walk_page_range_vma(vma, range->start, range->end, &cold_walk_ops, &walk_private); tlb_end_vma(tlb, vma); } static inline bool can_madv_lru_vma(struct vm_area_struct *vma) { return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB)); } static long madvise_cold(struct madvise_behavior *madv_behavior) { struct vm_area_struct *vma = madv_behavior->vma; struct mmu_gather tlb; if (!can_madv_lru_vma(vma)) return -EINVAL; lru_add_drain(); tlb_gather_mmu(&tlb, madv_behavior->mm); madvise_cold_page_range(&tlb, madv_behavior); tlb_finish_mmu(&tlb); return 0; } static void madvise_pageout_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, struct madvise_behavior_range *range) { struct madvise_walk_private walk_private = { .pageout = true, .tlb = tlb, }; tlb_start_vma(tlb, vma); walk_page_range_vma(vma, range->start, range->end, &cold_walk_ops, &walk_private); tlb_end_vma(tlb, vma); } static long madvise_pageout(struct madvise_behavior *madv_behavior) { struct mmu_gather tlb; struct vm_area_struct *vma = madv_behavior->vma; if (!can_madv_lru_vma(vma)) return -EINVAL; /* * If the VMA belongs to a private file mapping, there can be private * dirty pages which can be paged out if even this process is neither * owner nor write capable of the file. We allow private file mappings * further to pageout dirty anon pages. */ if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) && (vma->vm_flags & VM_MAYSHARE))) return 0; lru_add_drain(); tlb_gather_mmu(&tlb, madv_behavior->mm); madvise_pageout_page_range(&tlb, vma, &madv_behavior->range); tlb_finish_mmu(&tlb); return 0; } static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { const cydp_t cydp_flags = CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY; struct mmu_gather *tlb = walk->private; struct mm_struct *mm = tlb->mm; struct vm_area_struct *vma = walk->vma; spinlock_t *ptl; pte_t *start_pte, *pte, ptent; struct folio *folio; int nr_swap = 0; unsigned long next; int nr, max_nr; next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd)) if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) return 0; tlb_change_page_size(tlb, PAGE_SIZE); start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!start_pte) return 0; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); for (; addr != end; pte += nr, addr += PAGE_SIZE * nr) { nr = 1; ptent = ptep_get(pte); if (pte_none(ptent)) continue; /* * If the pte has swp_entry, just clear page table to * prevent swap-in which is more expensive rather than * (page allocation + zeroing). */ if (!pte_present(ptent)) { swp_entry_t entry; entry = pte_to_swp_entry(ptent); if (!non_swap_entry(entry)) { max_nr = (end - addr) / PAGE_SIZE; nr = swap_pte_batch(pte, max_nr, ptent); nr_swap -= nr; free_swap_and_cache_nr(entry, nr); clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm); } else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) { pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); } continue; } folio = vm_normal_folio(vma, addr, ptent); if (!folio || folio_is_zone_device(folio)) continue; /* * If we encounter a large folio, only split it if it is not * fully mapped within the range we are operating on. Otherwise * leave it as is so that it can be marked as lazyfree. If we * fail to split a folio, leave it in place and advance to the * next pte in the range. */ if (folio_test_large(folio)) { nr = madvise_folio_pte_batch(addr, end, folio, pte, &ptent); if (nr < folio_nr_pages(folio)) { int err; if (folio_maybe_mapped_shared(folio)) continue; if (!folio_trylock(folio)) continue; folio_get(folio); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(start_pte, ptl); start_pte = NULL; err = split_folio(folio); folio_unlock(folio); folio_put(folio); pte = pte_offset_map_lock(mm, pmd, addr, &ptl); start_pte = pte; if (!start_pte) break; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); if (!err) nr = 0; continue; } } if (folio_test_swapcache(folio) || folio_test_dirty(folio)) { if (!folio_trylock(folio)) continue; /* * If we have a large folio at this point, we know it is * fully mapped so if its mapcount is the same as its * number of pages, it must be exclusive. */ if (folio_mapcount(folio) != folio_nr_pages(folio)) { folio_unlock(folio); continue; } if (folio_test_swapcache(folio) && !folio_free_swap(folio)) { folio_unlock(folio); continue; } folio_clear_dirty(folio); folio_unlock(folio); } if (pte_young(ptent) || pte_dirty(ptent)) { clear_young_dirty_ptes(vma, addr, pte, nr, cydp_flags); tlb_remove_tlb_entries(tlb, pte, nr, addr); } folio_mark_lazyfree(folio); } if (nr_swap) add_mm_counter(mm, MM_SWAPENTS, nr_swap); if (start_pte) { arch_leave_lazy_mmu_mode(); pte_unmap_unlock(start_pte, ptl); } cond_resched(); return 0; } static inline enum page_walk_lock get_walk_lock(enum madvise_lock_mode mode) { switch (mode) { case MADVISE_VMA_READ_LOCK: return PGWALK_VMA_RDLOCK_VERIFY; case MADVISE_MMAP_READ_LOCK: return PGWALK_RDLOCK; default: /* Other modes don't require fixing up the walk_lock */ WARN_ON_ONCE(1); return PGWALK_RDLOCK; } } static int madvise_free_single_vma(struct madvise_behavior *madv_behavior) { struct mm_struct *mm = madv_behavior->mm; struct vm_area_struct *vma = madv_behavior->vma; unsigned long start_addr = madv_behavior->range.start; unsigned long end_addr = madv_behavior->range.end; struct mmu_notifier_range range; struct mmu_gather *tlb = madv_behavior->tlb; struct mm_walk_ops walk_ops = { .pmd_entry = madvise_free_pte_range, }; /* MADV_FREE works for only anon vma at the moment */ if (!vma_is_anonymous(vma)) return -EINVAL; range.start = max(vma->vm_start, start_addr); if (range.start >= vma->vm_end) return -EINVAL; range.end = min(vma->vm_end, end_addr); if (range.end <= vma->vm_start) return -EINVAL; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, range.start, range.end); lru_add_drain(); update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(&range); tlb_start_vma(tlb, vma); walk_ops.walk_lock = get_walk_lock(madv_behavior->lock_mode); walk_page_range_vma(vma, range.start, range.end, &walk_ops, tlb); tlb_end_vma(tlb, vma); mmu_notifier_invalidate_range_end(&range); return 0; } /* * Application no longer needs these pages. If the pages are dirty, * it's OK to just throw them away. The app will be more careful about * data it wants to keep. Be sure to free swap resources too. The * zap_page_range_single call sets things up for shrink_active_list to actually * free these pages later if no one else has touched them in the meantime, * although we could add these pages to a global reuse list for * shrink_active_list to pick up before reclaiming other pages. * * NB: This interface discards data rather than pushes it out to swap, * as some implementations do. This has performance implications for * applications like large transactional databases which want to discard * pages in anonymous maps after committing to backing store the data * that was kept in them. There is no reason to write this data out to * the swap area if the application is discarding it. * * An interface that causes the system to free clean pages and flush * dirty pages is already available as msync(MS_INVALIDATE). */ static long madvise_dontneed_single_vma(struct madvise_behavior *madv_behavior) { struct madvise_behavior_range *range = &madv_behavior->range; struct zap_details details = { .reclaim_pt = true, .even_cows = true, }; zap_page_range_single_batched( madv_behavior->tlb, madv_behavior->vma, range->start, range->end - range->start, &details); return 0; } static bool madvise_dontneed_free_valid_vma(struct madvise_behavior *madv_behavior) { struct vm_area_struct *vma = madv_behavior->vma; int behavior = madv_behavior->behavior; struct madvise_behavior_range *range = &madv_behavior->range; if (!is_vm_hugetlb_page(vma)) { unsigned int forbidden = VM_PFNMAP; if (behavior != MADV_DONTNEED_LOCKED) forbidden |= VM_LOCKED; return !(vma->vm_flags & forbidden); } if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED) return false; if (range->start & ~huge_page_mask(hstate_vma(vma))) return false; /* * Madvise callers expect the length to be rounded up to PAGE_SIZE * boundaries, and may be unaware that this VMA uses huge pages. * Avoid unexpected data loss by rounding down the number of * huge pages freed. */ range->end = ALIGN_DOWN(range->end, huge_page_size(hstate_vma(vma))); return true; } static long madvise_dontneed_free(struct madvise_behavior *madv_behavior) { struct mm_struct *mm = madv_behavior->mm; struct madvise_behavior_range *range = &madv_behavior->range; int behavior = madv_behavior->behavior; if (!madvise_dontneed_free_valid_vma(madv_behavior)) return -EINVAL; if (range->start == range->end) return 0; if (!userfaultfd_remove(madv_behavior->vma, range->start, range->end)) { struct vm_area_struct *vma; mark_mmap_lock_dropped(madv_behavior); mmap_read_lock(mm); madv_behavior->vma = vma = vma_lookup(mm, range->start); if (!vma) return -ENOMEM; /* * Potential end adjustment for hugetlb vma is OK as * the check below keeps end within vma. */ if (!madvise_dontneed_free_valid_vma(madv_behavior)) return -EINVAL; if (range->end > vma->vm_end) { /* * Don't fail if end > vma->vm_end. If the old * vma was split while the mmap_lock was * released the effect of the concurrent * operation may not cause madvise() to * have an undefined result. There may be an * adjacent next vma that we'll walk * next. userfaultfd_remove() will generate an * UFFD_EVENT_REMOVE repetition on the * end-vma->vm_end range, but the manager can * handle a repetition fine. */ range->end = vma->vm_end; } /* * If the memory region between start and end was * originally backed by 4kB pages and then remapped to * be backed by hugepages while mmap_lock was dropped, * the adjustment for hugetlb vma above may have rounded * end down to the start address. */ if (range->start == range->end) return 0; VM_WARN_ON(range->start > range->end); } if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED) return madvise_dontneed_single_vma(madv_behavior); else if (behavior == MADV_FREE) return madvise_free_single_vma(madv_behavior); else return -EINVAL; } static long madvise_populate(struct madvise_behavior *madv_behavior) { struct mm_struct *mm = madv_behavior->mm; const bool write = madv_behavior->behavior == MADV_POPULATE_WRITE; int locked = 1; unsigned long start = madv_behavior->range.start; unsigned long end = madv_behavior->range.end; long pages; while (start < end) { /* Populate (prefault) page tables readable/writable. */ pages = faultin_page_range(mm, start, end, write, &locked); if (!locked) { mmap_read_lock(mm); locked = 1; } if (pages < 0) { switch (pages) { case -EINTR: return -EINTR; case -EINVAL: /* Incompatible mappings / permissions. */ return -EINVAL; case -EHWPOISON: return -EHWPOISON; case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */ return -EFAULT; default: pr_warn_once("%s: unhandled return value: %ld\n", __func__, pages); fallthrough; case -ENOMEM: /* No VMA or out of memory. */ return -ENOMEM; } } start += pages * PAGE_SIZE; } return 0; } /* * Application wants to free up the pages and associated backing store. * This is effectively punching a hole into the middle of a file. */ static long madvise_remove(struct madvise_behavior *madv_behavior) { loff_t offset; int error; struct file *f; struct mm_struct *mm = madv_behavior->mm; struct vm_area_struct *vma = madv_behavior->vma; unsigned long start = madv_behavior->range.start; unsigned long end = madv_behavior->range.end; mark_mmap_lock_dropped(madv_behavior); if (vma->vm_flags & VM_LOCKED) return -EINVAL; f = vma->vm_file; if (!f || !f->f_mapping || !f->f_mapping->host) { return -EINVAL; } if (!vma_is_shared_maywrite(vma)) return -EACCES; offset = (loff_t)(start - vma->vm_start) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); /* * Filesystem's fallocate may need to take i_rwsem. We need to * explicitly grab a reference because the vma (and hence the * vma's reference to the file) can go away as soon as we drop * mmap_lock. */ get_file(f); if (userfaultfd_remove(vma, start, end)) { /* mmap_lock was not released by userfaultfd_remove() */ mmap_read_unlock(mm); } error = vfs_fallocate(f, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, offset, end - start); fput(f); mmap_read_lock(mm); return error; } static bool is_valid_guard_vma(struct vm_area_struct *vma, bool allow_locked) { vm_flags_t disallowed = VM_SPECIAL | VM_HUGETLB; /* * A user could lock after setting a guard range but that's fine, as * they'd not be able to fault in. The issue arises when we try to zap * existing locked VMAs. We don't want to do that. */ if (!allow_locked) disallowed |= VM_LOCKED; return !(vma->vm_flags & disallowed); } static bool is_guard_pte_marker(pte_t ptent) { return is_pte_marker(ptent) && is_guard_swp_entry(pte_to_swp_entry(ptent)); } static int guard_install_pud_entry(pud_t *pud, unsigned long addr, unsigned long next, struct mm_walk *walk) { pud_t pudval = pudp_get(pud); /* If huge return >0 so we abort the operation + zap. */ return pud_trans_huge(pudval); } static int guard_install_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) { pmd_t pmdval = pmdp_get(pmd); /* If huge return >0 so we abort the operation + zap. */ return pmd_trans_huge(pmdval); } static int guard_install_pte_entry(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk) { pte_t pteval = ptep_get(pte); unsigned long *nr_pages = (unsigned long *)walk->private; /* If there is already a guard page marker, we have nothing to do. */ if (is_guard_pte_marker(pteval)) { (*nr_pages)++; return 0; } /* If populated return >0 so we abort the operation + zap. */ return 1; } static int guard_install_set_pte(unsigned long addr, unsigned long next, pte_t *ptep, struct mm_walk *walk) { unsigned long *nr_pages = (unsigned long *)walk->private; /* Simply install a PTE marker, this causes segfault on access. */ *ptep = make_pte_marker(PTE_MARKER_GUARD); (*nr_pages)++; return 0; } static const struct mm_walk_ops guard_install_walk_ops = { .pud_entry = guard_install_pud_entry, .pmd_entry = guard_install_pmd_entry, .pte_entry = guard_install_pte_entry, .install_pte = guard_install_set_pte, .walk_lock = PGWALK_RDLOCK, }; static long madvise_guard_install(struct madvise_behavior *madv_behavior) { struct vm_area_struct *vma = madv_behavior->vma; struct madvise_behavior_range *range = &madv_behavior->range; long err; int i; if (!is_valid_guard_vma(vma, /* allow_locked = */false)) return -EINVAL; /* * If we install guard markers, then the range is no longer * empty from a page table perspective and therefore it's * appropriate to have an anon_vma. * * This ensures that on fork, we copy page tables correctly. */ err = anon_vma_prepare(vma); if (err) return err; /* * Optimistically try to install the guard marker pages first. If any * non-guard pages are encountered, give up and zap the range before * trying again. * * We try a few times before giving up and releasing back to userland to * loop around, releasing locks in the process to avoid contention. This * would only happen if there was a great many racing page faults. * * In most cases we should simply install the guard markers immediately * with no zap or looping. */ for (i = 0; i < MAX_MADVISE_GUARD_RETRIES; i++) { unsigned long nr_pages = 0; /* Returns < 0 on error, == 0 if success, > 0 if zap needed. */ err = walk_page_range_mm(vma->vm_mm, range->start, range->end, &guard_install_walk_ops, &nr_pages); if (err < 0) return err; if (err == 0) { unsigned long nr_expected_pages = PHYS_PFN(range->end - range->start); VM_WARN_ON(nr_pages != nr_expected_pages); return 0; } /* * OK some of the range have non-guard pages mapped, zap * them. This leaves existing guard pages in place. */ zap_page_range_single(vma, range->start, range->end - range->start, NULL); } /* * We were unable to install the guard pages due to being raced by page * faults. This should not happen ordinarily. We return to userspace and * immediately retry, relieving lock contention. */ return restart_syscall(); } static int guard_remove_pud_entry(pud_t *pud, unsigned long addr, unsigned long next, struct mm_walk *walk) { pud_t pudval = pudp_get(pud); /* If huge, cannot have guard pages present, so no-op - skip. */ if (pud_trans_huge(pudval)) walk->action = ACTION_CONTINUE; return 0; } static int guard_remove_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) { pmd_t pmdval = pmdp_get(pmd); /* If huge, cannot have guard pages present, so no-op - skip. */ if (pmd_trans_huge(pmdval)) walk->action = ACTION_CONTINUE; return 0; } static int guard_remove_pte_entry(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk) { pte_t ptent = ptep_get(pte); if (is_guard_pte_marker(ptent)) { /* Simply clear the PTE marker. */ pte_clear_not_present_full(walk->mm, addr, pte, false); update_mmu_cache(walk->vma, addr, pte); } return 0; } static const struct mm_walk_ops guard_remove_walk_ops = { .pud_entry = guard_remove_pud_entry, .pmd_entry = guard_remove_pmd_entry, .pte_entry = guard_remove_pte_entry, .walk_lock = PGWALK_RDLOCK, }; static long madvise_guard_remove(struct madvise_behavior *madv_behavior) { struct vm_area_struct *vma = madv_behavior->vma; struct madvise_behavior_range *range = &madv_behavior->range; /* * We're ok with removing guards in mlock()'d ranges, as this is a * non-destructive action. */ if (!is_valid_guard_vma(vma, /* allow_locked = */true)) return -EINVAL; return walk_page_range_vma(vma, range->start, range->end, &guard_remove_walk_ops, NULL); } #ifdef CONFIG_64BIT /* Does the madvise operation result in discarding of mapped data? */ static bool is_discard(int behavior) { switch (behavior) { case MADV_FREE: case MADV_DONTNEED: case MADV_DONTNEED_LOCKED: case MADV_REMOVE: case MADV_DONTFORK: case MADV_WIPEONFORK: case MADV_GUARD_INSTALL: return true; } return false; } /* * We are restricted from madvise()'ing mseal()'d VMAs only in very particular * circumstances - discarding of data from read-only anonymous SEALED mappings. * * This is because users cannot trivally discard data from these VMAs, and may * only do so via an appropriate madvise() call. */ static bool can_madvise_modify(struct madvise_behavior *madv_behavior) { struct vm_area_struct *vma = madv_behavior->vma; /* If the VMA isn't sealed we're good. */ if (!vma_is_sealed(vma)) return true; /* For a sealed VMA, we only care about discard operations. */ if (!is_discard(madv_behavior->behavior)) return true; /* * We explicitly permit all file-backed mappings, whether MAP_SHARED or * MAP_PRIVATE. * * The latter causes some complications. Because now, one can mmap() * read/write a MAP_PRIVATE mapping, write to it, then mprotect() * read-only, mseal() and a discard will be permitted. * * However, in order to avoid issues with potential use of madvise(..., * MADV_DONTNEED) of mseal()'d .text mappings we, for the time being, * permit this. */ if (!vma_is_anonymous(vma)) return true; /* If the user could write to the mapping anyway, then this is fine. */ if ((vma->vm_flags & VM_WRITE) && arch_vma_access_permitted(vma, /* write= */ true, /* execute= */ false, /* foreign= */ false)) return true; /* Otherwise, we are not permitted to perform this operation. */ return false; } #else static bool can_madvise_modify(struct madvise_behavior *madv_behavior) { return true; } #endif /* * Apply an madvise behavior to a region of a vma. madvise_update_vma * will handle splitting a vm area into separate areas, each area with its own * behavior. */ static int madvise_vma_behavior(struct madvise_behavior *madv_behavior) { int behavior = madv_behavior->behavior; struct vm_area_struct *vma = madv_behavior->vma; vm_flags_t new_flags = vma->vm_flags; struct madvise_behavior_range *range = &madv_behavior->range; int error; if (unlikely(!can_madvise_modify(madv_behavior))) return -EPERM; switch (behavior) { case MADV_REMOVE: return madvise_remove(madv_behavior); case MADV_WILLNEED: return madvise_willneed(madv_behavior); case MADV_COLD: return madvise_cold(madv_behavior); case MADV_PAGEOUT: return madvise_pageout(madv_behavior); case MADV_FREE: case MADV_DONTNEED: case MADV_DONTNEED_LOCKED: return madvise_dontneed_free(madv_behavior); case MADV_COLLAPSE: return madvise_collapse(vma, range->start, range->end, &madv_behavior->lock_dropped); case MADV_GUARD_INSTALL: return madvise_guard_install(madv_behavior); case MADV_GUARD_REMOVE: return madvise_guard_remove(madv_behavior); /* The below behaviours update VMAs via madvise_update_vma(). */ case MADV_NORMAL: new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; break; case MADV_SEQUENTIAL: new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; break; case MADV_RANDOM: new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; break; case MADV_DONTFORK: new_flags |= VM_DONTCOPY; break; case MADV_DOFORK: if (new_flags & VM_IO) return -EINVAL; new_flags &= ~VM_DONTCOPY; break; case MADV_WIPEONFORK: /* MADV_WIPEONFORK is only supported on anonymous memory. */ if (vma->vm_file || new_flags & VM_SHARED) return -EINVAL; new_flags |= VM_WIPEONFORK; break; case MADV_KEEPONFORK: if (new_flags & VM_DROPPABLE) return -EINVAL; new_flags &= ~VM_WIPEONFORK; break; case MADV_DONTDUMP: new_flags |= VM_DONTDUMP; break; case MADV_DODUMP: if ((!is_vm_hugetlb_page(vma) && (new_flags & VM_SPECIAL)) || (new_flags & VM_DROPPABLE)) return -EINVAL; new_flags &= ~VM_DONTDUMP; break; case MADV_MERGEABLE: case MADV_UNMERGEABLE: error = ksm_madvise(vma, range->start, range->end, behavior, &new_flags); if (error) goto out; break; case MADV_HUGEPAGE: case MADV_NOHUGEPAGE: error = hugepage_madvise(vma, &new_flags, behavior); if (error) goto out; break; case __MADV_SET_ANON_VMA_NAME: /* Only anonymous mappings can be named */ if (vma->vm_file && !vma_is_anon_shmem(vma)) return -EBADF; break; } /* This is a write operation.*/ VM_WARN_ON_ONCE(madv_behavior->lock_mode != MADVISE_MMAP_WRITE_LOCK); error = madvise_update_vma(new_flags, madv_behavior); out: /* * madvise() returns EAGAIN if kernel resources, such as * slab, are temporarily unavailable. */ if (error == -ENOMEM) error = -EAGAIN; return error; } #ifdef CONFIG_MEMORY_FAILURE /* * Error injection support for memory error handling. */ static int madvise_inject_error(struct madvise_behavior *madv_behavior) { unsigned long size; unsigned long start = madv_behavior->range.start; unsigned long end = madv_behavior->range.end; if (!capable(CAP_SYS_ADMIN)) return -EPERM; for (; start < end; start += size) { unsigned long pfn; struct page *page; int ret; ret = get_user_pages_fast(start, 1, 0, &page); if (ret != 1) return ret; pfn = page_to_pfn(page); /* * When soft offlining hugepages, after migrating the page * we dissolve it, therefore in the second loop "page" will * no longer be a compound page. */ size = page_size(compound_head(page)); if (madv_behavior->behavior == MADV_SOFT_OFFLINE) { pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n", pfn, start); ret = soft_offline_page(pfn, MF_COUNT_INCREASED); } else { pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", pfn, start); ret = memory_failure(pfn, MF_ACTION_REQUIRED | MF_COUNT_INCREASED | MF_SW_SIMULATED); if (ret == -EOPNOTSUPP) ret = 0; } if (ret) return ret; } return 0; } static bool is_memory_failure(struct madvise_behavior *madv_behavior) { switch (madv_behavior->behavior) { case MADV_HWPOISON: case MADV_SOFT_OFFLINE: return true; default: return false; } } #else static int madvise_inject_error(struct madvise_behavior *madv_behavior) { return 0; } static bool is_memory_failure(struct madvise_behavior *madv_behavior) { return false; } #endif /* CONFIG_MEMORY_FAILURE */ static bool madvise_behavior_valid(int behavior) { switch (behavior) { case MADV_DOFORK: case MADV_DONTFORK: case MADV_NORMAL: case MADV_SEQUENTIAL: case MADV_RANDOM: case MADV_REMOVE: case MADV_WILLNEED: case MADV_DONTNEED: case MADV_DONTNEED_LOCKED: case MADV_FREE: case MADV_COLD: case MADV_PAGEOUT: case MADV_POPULATE_READ: case MADV_POPULATE_WRITE: #ifdef CONFIG_KSM case MADV_MERGEABLE: case MADV_UNMERGEABLE: #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE case MADV_HUGEPAGE: case MADV_NOHUGEPAGE: case MADV_COLLAPSE: #endif case MADV_DONTDUMP: case MADV_DODUMP: case MADV_WIPEONFORK: case MADV_KEEPONFORK: case MADV_GUARD_INSTALL: case MADV_GUARD_REMOVE: #ifdef CONFIG_MEMORY_FAILURE case MADV_SOFT_OFFLINE: case MADV_HWPOISON: #endif return true; default: return false; } } /* Can we invoke process_madvise() on a remote mm for the specified behavior? */ static bool process_madvise_remote_valid(int behavior) { switch (behavior) { case MADV_COLD: case MADV_PAGEOUT: case MADV_WILLNEED: case MADV_COLLAPSE: return true; default: return false; } } /* * Try to acquire a VMA read lock if possible. * * We only support this lock over a single VMA, which the input range must * span either partially or fully. * * This function always returns with an appropriate lock held. If a VMA read * lock could be acquired, we return true and set madv_behavior state * accordingly. * * If a VMA read lock could not be acquired, we return false and expect caller to * fallback to mmap lock behaviour. */ static bool try_vma_read_lock(struct madvise_behavior *madv_behavior) { struct mm_struct *mm = madv_behavior->mm; struct vm_area_struct *vma; vma = lock_vma_under_rcu(mm, madv_behavior->range.start); if (!vma) goto take_mmap_read_lock; /* * Must span only a single VMA; uffd and remote processes are * unsupported. */ if (madv_behavior->range.end > vma->vm_end || current->mm != mm || userfaultfd_armed(vma)) { vma_end_read(vma); goto take_mmap_read_lock; } madv_behavior->vma = vma; return true; take_mmap_read_lock: mmap_read_lock(mm); madv_behavior->lock_mode = MADVISE_MMAP_READ_LOCK; return false; } /* * Walk the vmas in range [start,end), and call the madvise_vma_behavior * function on each one. The function will get start and end parameters that * cover the overlap between the current vma and the original range. Any * unmapped regions in the original range will result in this function returning * -ENOMEM while still calling the madvise_vma_behavior function on all of the * existing vmas in the range. Must be called with the mmap_lock held for * reading or writing. */ static int madvise_walk_vmas(struct madvise_behavior *madv_behavior) { struct mm_struct *mm = madv_behavior->mm; struct madvise_behavior_range *range = &madv_behavior->range; /* range is updated to span each VMA, so store end of entire range. */ unsigned long last_end = range->end; int unmapped_error = 0; int error; struct vm_area_struct *prev, *vma; /* * If VMA read lock is supported, apply madvise to a single VMA * tentatively, avoiding walking VMAs. */ if (madv_behavior->lock_mode == MADVISE_VMA_READ_LOCK && try_vma_read_lock(madv_behavior)) { error = madvise_vma_behavior(madv_behavior); vma_end_read(madv_behavior->vma); return error; } vma = find_vma_prev(mm, range->start, &prev); if (vma && range->start > vma->vm_start) prev = vma; for (;;) { /* Still start < end. */ if (!vma) return -ENOMEM; /* Here start < (last_end|vma->vm_end). */ if (range->start < vma->vm_start) { /* * This indicates a gap between VMAs in the input * range. This does not cause the operation to abort, * rather we simply return -ENOMEM to indicate that this * has happened, but carry on. */ unmapped_error = -ENOMEM; range->start = vma->vm_start; if (range->start >= last_end) break; } /* Here vma->vm_start <= range->start < (last_end|vma->vm_end) */ range->end = min(vma->vm_end, last_end); /* Here vma->vm_start <= range->start < range->end <= (last_end|vma->vm_end). */ madv_behavior->prev = prev; madv_behavior->vma = vma; error = madvise_vma_behavior(madv_behavior); if (error) return error; if (madv_behavior->lock_dropped) { /* We dropped the mmap lock, we can't ref the VMA. */ prev = NULL; vma = NULL; madv_behavior->lock_dropped = false; } else { vma = madv_behavior->vma; prev = vma; } if (vma && range->end < vma->vm_end) range->end = vma->vm_end; if (range->end >= last_end) break; vma = find_vma(mm, vma ? vma->vm_end : range->end); range->start = range->end; } return unmapped_error; } /* * Any behaviour which results in changes to the vma->vm_flags needs to * take mmap_lock for writing. Others, which simply traverse vmas, need * to only take it for reading. */ static enum madvise_lock_mode get_lock_mode(struct madvise_behavior *madv_behavior) { if (is_memory_failure(madv_behavior)) return MADVISE_NO_LOCK; switch (madv_behavior->behavior) { case MADV_REMOVE: case MADV_WILLNEED: case MADV_COLD: case MADV_PAGEOUT: case MADV_POPULATE_READ: case MADV_POPULATE_WRITE: case MADV_COLLAPSE: case MADV_GUARD_INSTALL: case MADV_GUARD_REMOVE: return MADVISE_MMAP_READ_LOCK; case MADV_DONTNEED: case MADV_DONTNEED_LOCKED: case MADV_FREE: return MADVISE_VMA_READ_LOCK; default: return MADVISE_MMAP_WRITE_LOCK; } } static int madvise_lock(struct madvise_behavior *madv_behavior) { struct mm_struct *mm = madv_behavior->mm; enum madvise_lock_mode lock_mode = get_lock_mode(madv_behavior); switch (lock_mode) { case MADVISE_NO_LOCK: break; case MADVISE_MMAP_WRITE_LOCK: if (mmap_write_lock_killable(mm)) return -EINTR; break; case MADVISE_MMAP_READ_LOCK: mmap_read_lock(mm); break; case MADVISE_VMA_READ_LOCK: /* We will acquire the lock per-VMA in madvise_walk_vmas(). */ break; } madv_behavior->lock_mode = lock_mode; return 0; } static void madvise_unlock(struct madvise_behavior *madv_behavior) { struct mm_struct *mm = madv_behavior->mm; switch (madv_behavior->lock_mode) { case MADVISE_NO_LOCK: return; case MADVISE_MMAP_WRITE_LOCK: mmap_write_unlock(mm); break; case MADVISE_MMAP_READ_LOCK: mmap_read_unlock(mm); break; case MADVISE_VMA_READ_LOCK: /* We will drop the lock per-VMA in madvise_walk_vmas(). */ break; } madv_behavior->lock_mode = MADVISE_NO_LOCK; } static bool madvise_batch_tlb_flush(int behavior) { switch (behavior) { case MADV_DONTNEED: case MADV_DONTNEED_LOCKED: case MADV_FREE: return true; default: return false; } } static void madvise_init_tlb(struct madvise_behavior *madv_behavior) { if (madvise_batch_tlb_flush(madv_behavior->behavior)) tlb_gather_mmu(madv_behavior->tlb, madv_behavior->mm); } static void madvise_finish_tlb(struct madvise_behavior *madv_behavior) { if (madvise_batch_tlb_flush(madv_behavior->behavior)) tlb_finish_mmu(madv_behavior->tlb); } static bool is_valid_madvise(unsigned long start, size_t len_in, int behavior) { size_t len; if (!madvise_behavior_valid(behavior)) return false; if (!PAGE_ALIGNED(start)) return false; len = PAGE_ALIGN(len_in); /* Check to see whether len was rounded up from small -ve to zero */ if (len_in && !len) return false; if (start + len < start) return false; return true; } /* * madvise_should_skip() - Return if the request is invalid or nothing. * @start: Start address of madvise-requested address range. * @len_in: Length of madvise-requested address range. * @behavior: Requested madvise behavor. * @err: Pointer to store an error code from the check. * * If the specified behaviour is invalid or nothing would occur, we skip the * operation. This function returns true in the cases, otherwise false. In * the former case we store an error on @err. */ static bool madvise_should_skip(unsigned long start, size_t len_in, int behavior, int *err) { if (!is_valid_madvise(start, len_in, behavior)) { *err = -EINVAL; return true; } if (start + PAGE_ALIGN(len_in) == start) { *err = 0; return true; } return false; } static bool is_madvise_populate(struct madvise_behavior *madv_behavior) { switch (madv_behavior->behavior) { case MADV_POPULATE_READ: case MADV_POPULATE_WRITE: return true; default: return false; } } /* * untagged_addr_remote() assumes mmap_lock is already held. On * architectures like x86 and RISC-V, tagging is tricky because each * mm may have a different tagging mask. However, we might only hold * the per-VMA lock (currently only local processes are supported), * so untagged_addr is used to avoid the mmap_lock assertion for * local processes. */ static inline unsigned long get_untagged_addr(struct mm_struct *mm, unsigned long start) { return current->mm == mm ? untagged_addr(start) : untagged_addr_remote(mm, start); } static int madvise_do_behavior(unsigned long start, size_t len_in, struct madvise_behavior *madv_behavior) { struct blk_plug plug; int error; struct madvise_behavior_range *range = &madv_behavior->range; if (is_memory_failure(madv_behavior)) { range->start = start; range->end = start + len_in; return madvise_inject_error(madv_behavior); } range->start = get_untagged_addr(madv_behavior->mm, start); range->end = range->start + PAGE_ALIGN(len_in); blk_start_plug(&plug); if (is_madvise_populate(madv_behavior)) error = madvise_populate(madv_behavior); else error = madvise_walk_vmas(madv_behavior); blk_finish_plug(&plug); return error; } /* * The madvise(2) system call. * * Applications can use madvise() to advise the kernel how it should * handle paging I/O in this VM area. The idea is to help the kernel * use appropriate read-ahead and caching techniques. The information * provided is advisory only, and can be safely disregarded by the * kernel without affecting the correct operation of the application. * * behavior values: * MADV_NORMAL - the default behavior is to read clusters. This * results in some read-ahead and read-behind. * MADV_RANDOM - the system should read the minimum amount of data * on any access, since it is unlikely that the appli- * cation will need more than what it asks for. * MADV_SEQUENTIAL - pages in the given range will probably be accessed * once, so they can be aggressively read ahead, and * can be freed soon after they are accessed. * MADV_WILLNEED - the application is notifying the system to read * some pages ahead. * MADV_DONTNEED - the application is finished with the given range, * so the kernel can free resources associated with it. * MADV_FREE - the application marks pages in the given range as lazy free, * where actual purges are postponed until memory pressure happens. * MADV_REMOVE - the application wants to free up the given range of * pages and associated backing store. * MADV_DONTFORK - omit this area from child's address space when forking: * typically, to avoid COWing pages pinned by get_user_pages(). * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. * MADV_WIPEONFORK - present the child process with zero-filled memory in this * range after a fork. * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK * MADV_HWPOISON - trigger memory error handler as if the given memory range * were corrupted by unrecoverable hardware memory failure. * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. * MADV_MERGEABLE - the application recommends that KSM try to merge pages in * this area with pages of identical content from other such areas. * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. * MADV_HUGEPAGE - the application wants to back the given range by transparent * huge pages in the future. Existing pages might be coalesced and * new pages might be allocated as THP. * MADV_NOHUGEPAGE - mark the given range as not worth being backed by * transparent huge pages so the existing pages will not be * coalesced into THP and new pages will not be allocated as THP. * MADV_COLLAPSE - synchronously coalesce pages into new THP. * MADV_DONTDUMP - the application wants to prevent pages in the given range * from being included in its core dump. * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. * MADV_COLD - the application is not expected to use this memory soon, * deactivate pages in this range so that they can be reclaimed * easily if memory pressure happens. * MADV_PAGEOUT - the application is not expected to use this memory soon, * page out the pages in this range immediately. * MADV_POPULATE_READ - populate (prefault) page tables readable by * triggering read faults if required * MADV_POPULATE_WRITE - populate (prefault) page tables writable by * triggering write faults if required * * return values: * zero - success * -EINVAL - start + len < 0, start is not page-aligned, * "behavior" is not a valid value, or application * is attempting to release locked or shared pages, * or the specified address range includes file, Huge TLB, * MAP_SHARED or VMPFNMAP range. * -ENOMEM - addresses in the specified range are not currently * mapped, or are outside the AS of the process. * -EIO - an I/O error occurred while paging in data. * -EBADF - map exists, but area maps something that isn't a file. * -EAGAIN - a kernel resource was temporarily unavailable. * -EPERM - memory is sealed. */ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior) { int error; struct mmu_gather tlb; struct madvise_behavior madv_behavior = { .mm = mm, .behavior = behavior, .tlb = &tlb, }; if (madvise_should_skip(start, len_in, behavior, &error)) return error; error = madvise_lock(&madv_behavior); if (error) return error; madvise_init_tlb(&madv_behavior); error = madvise_do_behavior(start, len_in, &madv_behavior); madvise_finish_tlb(&madv_behavior); madvise_unlock(&madv_behavior); return error; } SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) { return do_madvise(current->mm, start, len_in, behavior); } /* Perform an madvise operation over a vector of addresses and lengths. */ static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter, int behavior) { ssize_t ret = 0; size_t total_len; struct mmu_gather tlb; struct madvise_behavior madv_behavior = { .mm = mm, .behavior = behavior, .tlb = &tlb, }; total_len = iov_iter_count(iter); ret = madvise_lock(&madv_behavior); if (ret) return ret; madvise_init_tlb(&madv_behavior); while (iov_iter_count(iter)) { unsigned long start = (unsigned long)iter_iov_addr(iter); size_t len_in = iter_iov_len(iter); int error; if (madvise_should_skip(start, len_in, behavior, &error)) ret = error; else ret = madvise_do_behavior(start, len_in, &madv_behavior); /* * An madvise operation is attempting to restart the syscall, * but we cannot proceed as it would not be correct to repeat * the operation in aggregate, and would be surprising to the * user. * * We drop and reacquire locks so it is safe to just loop and * try again. We check for fatal signals in case we need exit * early anyway. */ if (ret == -ERESTARTNOINTR) { if (fatal_signal_pending(current)) { ret = -EINTR; break; } /* Drop and reacquire lock to unwind race. */ madvise_finish_tlb(&madv_behavior); madvise_unlock(&madv_behavior); ret = madvise_lock(&madv_behavior); if (ret) goto out; madvise_init_tlb(&madv_behavior); continue; } if (ret < 0) break; iov_iter_advance(iter, iter_iov_len(iter)); } madvise_finish_tlb(&madv_behavior); madvise_unlock(&madv_behavior); out: ret = (total_len - iov_iter_count(iter)) ? : ret; return ret; } SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, size_t, vlen, int, behavior, unsigned int, flags) { ssize_t ret; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; struct task_struct *task; struct mm_struct *mm; unsigned int f_flags; if (flags != 0) { ret = -EINVAL; goto out; } ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); if (ret < 0) goto out; task = pidfd_get_task(pidfd, &f_flags); if (IS_ERR(task)) { ret = PTR_ERR(task); goto free_iov; } /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */ mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); if (IS_ERR(mm)) { ret = PTR_ERR(mm); goto release_task; } /* * We need only perform this check if we are attempting to manipulate a * remote process's address space. */ if (mm != current->mm && !process_madvise_remote_valid(behavior)) { ret = -EINVAL; goto release_mm; } /* * Require CAP_SYS_NICE for influencing process performance. Note that * only non-destructive hints are currently supported for remote * processes. */ if (mm != current->mm && !capable(CAP_SYS_NICE)) { ret = -EPERM; goto release_mm; } ret = vector_madvise(mm, &iter, behavior); release_mm: mmput(mm); release_task: put_task_struct(task); free_iov: kfree(iov); out: return ret; } #ifdef CONFIG_ANON_VMA_NAME #define ANON_VMA_NAME_MAX_LEN 80 #define ANON_VMA_NAME_INVALID_CHARS "\\`$[]" static inline bool is_valid_name_char(char ch) { /* printable ascii characters, excluding ANON_VMA_NAME_INVALID_CHARS */ return ch > 0x1f && ch < 0x7f && !strchr(ANON_VMA_NAME_INVALID_CHARS, ch); } static int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, unsigned long len_in, struct anon_vma_name *anon_name) { unsigned long end; unsigned long len; int error; struct madvise_behavior madv_behavior = { .mm = mm, .behavior = __MADV_SET_ANON_VMA_NAME, .anon_name = anon_name, }; if (start & ~PAGE_MASK) return -EINVAL; len = (len_in + ~PAGE_MASK) & PAGE_MASK; /* Check to see whether len was rounded up from small -ve to zero */ if (len_in && !len) return -EINVAL; end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; madv_behavior.range.start = start; madv_behavior.range.end = end; error = madvise_lock(&madv_behavior); if (error) return error; error = madvise_walk_vmas(&madv_behavior); madvise_unlock(&madv_behavior); return error; } int set_anon_vma_name(unsigned long addr, unsigned long size, const char __user *uname) { struct anon_vma_name *anon_name = NULL; struct mm_struct *mm = current->mm; int error; if (uname) { char *name, *pch; name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN); if (IS_ERR(name)) return PTR_ERR(name); for (pch = name; *pch != '\0'; pch++) { if (!is_valid_name_char(*pch)) { kfree(name); return -EINVAL; } } /* anon_vma has its own copy */ anon_name = anon_vma_name_alloc(name); kfree(name); if (!anon_name) return -ENOMEM; } error = madvise_set_anon_name(mm, addr, size, anon_name); anon_vma_name_put(anon_name); return error; } #endif |
| 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 4 4 2 1 4 4 4 4 2 2 4 2 2 1 1 1 1 1 1 1 5 5 1 1 4 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 | // SPDX-License-Identifier: GPL-2.0-or-later /* * imon.c: input and display driver for SoundGraph iMON IR/VFD/LCD * * Copyright(C) 2010 Jarod Wilson <jarod@wilsonet.com> * Portions based on the original lirc_imon driver, * Copyright(C) 2004 Venky Raju(dev@venky.ws) * * Huge thanks to R. Geoff Newbury for invaluable debugging on the * 0xffdc iMON devices, and for sending me one to hack on, without * which the support for them wouldn't be nearly as good. Thanks * also to the numerous 0xffdc device owners that tested auto-config * support for me and provided debug dumps from their devices. */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/ktime.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/ratelimit.h> #include <linux/input.h> #include <linux/usb.h> #include <linux/usb/input.h> #include <media/rc-core.h> #include <linux/timer.h> #define MOD_AUTHOR "Jarod Wilson <jarod@wilsonet.com>" #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" #define MOD_NAME "imon" #define MOD_VERSION "0.9.4" #define DISPLAY_MINOR_BASE 144 #define DEVICE_NAME "lcd%d" #define IMON_CLOCK_ENABLE_PACKETS 2 /*** P R O T O T Y P E S ***/ /* USB Callback prototypes */ static int imon_probe(struct usb_interface *interface, const struct usb_device_id *id); static void imon_disconnect(struct usb_interface *interface); static void usb_rx_callback_intf0(struct urb *urb); static void usb_rx_callback_intf1(struct urb *urb); static void usb_tx_callback(struct urb *urb); /* suspend/resume support */ static int imon_resume(struct usb_interface *intf); static int imon_suspend(struct usb_interface *intf, pm_message_t message); /* Display file_operations function prototypes */ static int display_open(struct inode *inode, struct file *file); static int display_close(struct inode *inode, struct file *file); /* VFD write operation */ static ssize_t vfd_write(struct file *file, const char __user *buf, size_t n_bytes, loff_t *pos); /* LCD file_operations override function prototypes */ static ssize_t lcd_write(struct file *file, const char __user *buf, size_t n_bytes, loff_t *pos); /*** G L O B A L S ***/ struct imon_panel_key_table { u64 hw_code; u32 keycode; }; struct imon_usb_dev_descr { __u16 flags; #define IMON_NO_FLAGS 0 #define IMON_NEED_20MS_PKT_DELAY 1 #define IMON_SUPPRESS_REPEATED_KEYS 2 struct imon_panel_key_table key_table[]; }; struct imon_context { struct device *dev; /* Newer devices have two interfaces */ struct usb_device *usbdev_intf0; struct usb_device *usbdev_intf1; bool display_supported; /* not all controllers do */ bool display_isopen; /* display port has been opened */ bool rf_device; /* true if iMON 2.4G LT/DT RF device */ bool rf_isassociating; /* RF remote associating */ bool dev_present_intf0; /* USB device presence, interface 0 */ bool dev_present_intf1; /* USB device presence, interface 1 */ struct mutex lock; /* to lock this object */ wait_queue_head_t remove_ok; /* For unexpected USB disconnects */ struct usb_endpoint_descriptor *rx_endpoint_intf0; struct usb_endpoint_descriptor *rx_endpoint_intf1; struct usb_endpoint_descriptor *tx_endpoint; struct urb *rx_urb_intf0; struct urb *rx_urb_intf1; struct urb *tx_urb; bool tx_control; unsigned char usb_rx_buf[8]; unsigned char usb_tx_buf[8]; unsigned int send_packet_delay; struct tx_t { unsigned char data_buf[35]; /* user data buffer */ struct completion finished; /* wait for write to finish */ bool busy; /* write in progress */ int status; /* status of tx completion */ } tx; u16 vendor; /* usb vendor ID */ u16 product; /* usb product ID */ struct rc_dev *rdev; /* rc-core device for remote */ struct input_dev *idev; /* input device for panel & IR mouse */ struct input_dev *touch; /* input device for touchscreen */ spinlock_t kc_lock; /* make sure we get keycodes right */ u32 kc; /* current input keycode */ u32 last_keycode; /* last reported input keycode */ u32 rc_scancode; /* the computed remote scancode */ u8 rc_toggle; /* the computed remote toggle bit */ u64 rc_proto; /* iMON or MCE (RC6) IR protocol? */ bool release_code; /* some keys send a release code */ u8 display_type; /* store the display type */ bool pad_mouse; /* toggle kbd(0)/mouse(1) mode */ char name_rdev[128]; /* rc input device name */ char phys_rdev[64]; /* rc input device phys path */ char name_idev[128]; /* input device name */ char phys_idev[64]; /* input device phys path */ char name_touch[128]; /* touch screen name */ char phys_touch[64]; /* touch screen phys path */ struct timer_list ttimer; /* touch screen timer */ int touch_x; /* x coordinate on touchscreen */ int touch_y; /* y coordinate on touchscreen */ const struct imon_usb_dev_descr *dev_descr; /* device description with key */ /* table for front panels */ /* * Fields for deferring free_imon_context(). * * Since reference to "struct imon_context" is stored into * "struct file"->private_data, we need to remember * how many file descriptors might access this "struct imon_context". */ refcount_t users; /* * Use a flag for telling display_open()/vfd_write()/lcd_write() that * imon_disconnect() was already called. */ bool disconnected; /* * We need to wait for RCU grace period in order to allow * display_open() to safely check ->disconnected and increment ->users. */ struct rcu_head rcu; }; #define TOUCH_TIMEOUT (HZ/30) /* vfd character device file operations */ static const struct file_operations vfd_fops = { .owner = THIS_MODULE, .open = display_open, .write = vfd_write, .release = display_close, .llseek = noop_llseek, }; /* lcd character device file operations */ static const struct file_operations lcd_fops = { .owner = THIS_MODULE, .open = display_open, .write = lcd_write, .release = display_close, .llseek = noop_llseek, }; enum { IMON_DISPLAY_TYPE_AUTO = 0, IMON_DISPLAY_TYPE_VFD = 1, IMON_DISPLAY_TYPE_LCD = 2, IMON_DISPLAY_TYPE_VGA = 3, IMON_DISPLAY_TYPE_NONE = 4, }; enum { IMON_KEY_IMON = 0, IMON_KEY_MCE = 1, IMON_KEY_PANEL = 2, }; static struct usb_class_driver imon_vfd_class = { .name = DEVICE_NAME, .fops = &vfd_fops, .minor_base = DISPLAY_MINOR_BASE, }; static struct usb_class_driver imon_lcd_class = { .name = DEVICE_NAME, .fops = &lcd_fops, .minor_base = DISPLAY_MINOR_BASE, }; /* imon receiver front panel/knob key table */ static const struct imon_usb_dev_descr imon_default_table = { .flags = IMON_NO_FLAGS, .key_table = { { 0x000000000f00ffeell, KEY_MEDIA }, /* Go */ { 0x000000001200ffeell, KEY_UP }, { 0x000000001300ffeell, KEY_DOWN }, { 0x000000001400ffeell, KEY_LEFT }, { 0x000000001500ffeell, KEY_RIGHT }, { 0x000000001600ffeell, KEY_ENTER }, { 0x000000001700ffeell, KEY_ESC }, { 0x000000001f00ffeell, KEY_AUDIO }, { 0x000000002000ffeell, KEY_VIDEO }, { 0x000000002100ffeell, KEY_CAMERA }, { 0x000000002700ffeell, KEY_DVD }, { 0x000000002300ffeell, KEY_TV }, { 0x000000002b00ffeell, KEY_EXIT }, { 0x000000002c00ffeell, KEY_SELECT }, { 0x000000002d00ffeell, KEY_MENU }, { 0x000000000500ffeell, KEY_PREVIOUS }, { 0x000000000700ffeell, KEY_REWIND }, { 0x000000000400ffeell, KEY_STOP }, { 0x000000003c00ffeell, KEY_PLAYPAUSE }, { 0x000000000800ffeell, KEY_FASTFORWARD }, { 0x000000000600ffeell, KEY_NEXT }, { 0x000000010000ffeell, KEY_RIGHT }, { 0x000001000000ffeell, KEY_LEFT }, { 0x000000003d00ffeell, KEY_SELECT }, { 0x000100000000ffeell, KEY_VOLUMEUP }, { 0x010000000000ffeell, KEY_VOLUMEDOWN }, { 0x000000000100ffeell, KEY_MUTE }, /* 0xffdc iMON MCE VFD */ { 0x00010000ffffffeell, KEY_VOLUMEUP }, { 0x01000000ffffffeell, KEY_VOLUMEDOWN }, { 0x00000001ffffffeell, KEY_MUTE }, { 0x0000000fffffffeell, KEY_MEDIA }, { 0x00000012ffffffeell, KEY_UP }, { 0x00000013ffffffeell, KEY_DOWN }, { 0x00000014ffffffeell, KEY_LEFT }, { 0x00000015ffffffeell, KEY_RIGHT }, { 0x00000016ffffffeell, KEY_ENTER }, { 0x00000017ffffffeell, KEY_ESC }, /* iMON Knob values */ { 0x000100ffffffffeell, KEY_VOLUMEUP }, { 0x010000ffffffffeell, KEY_VOLUMEDOWN }, { 0x000008ffffffffeell, KEY_MUTE }, { 0, KEY_RESERVED }, } }; static const struct imon_usb_dev_descr imon_OEM_VFD = { .flags = IMON_NEED_20MS_PKT_DELAY, .key_table = { { 0x000000000f00ffeell, KEY_MEDIA }, /* Go */ { 0x000000001200ffeell, KEY_UP }, { 0x000000001300ffeell, KEY_DOWN }, { 0x000000001400ffeell, KEY_LEFT }, { 0x000000001500ffeell, KEY_RIGHT }, { 0x000000001600ffeell, KEY_ENTER }, { 0x000000001700ffeell, KEY_ESC }, { 0x000000001f00ffeell, KEY_AUDIO }, { 0x000000002b00ffeell, KEY_EXIT }, { 0x000000002c00ffeell, KEY_SELECT }, { 0x000000002d00ffeell, KEY_MENU }, { 0x000000000500ffeell, KEY_PREVIOUS }, { 0x000000000700ffeell, KEY_REWIND }, { 0x000000000400ffeell, KEY_STOP }, { 0x000000003c00ffeell, KEY_PLAYPAUSE }, { 0x000000000800ffeell, KEY_FASTFORWARD }, { 0x000000000600ffeell, KEY_NEXT }, { 0x000000010000ffeell, KEY_RIGHT }, { 0x000001000000ffeell, KEY_LEFT }, { 0x000000003d00ffeell, KEY_SELECT }, { 0x000100000000ffeell, KEY_VOLUMEUP }, { 0x010000000000ffeell, KEY_VOLUMEDOWN }, { 0x000000000100ffeell, KEY_MUTE }, /* 0xffdc iMON MCE VFD */ { 0x00010000ffffffeell, KEY_VOLUMEUP }, { 0x01000000ffffffeell, KEY_VOLUMEDOWN }, { 0x00000001ffffffeell, KEY_MUTE }, { 0x0000000fffffffeell, KEY_MEDIA }, { 0x00000012ffffffeell, KEY_UP }, { 0x00000013ffffffeell, KEY_DOWN }, { 0x00000014ffffffeell, KEY_LEFT }, { 0x00000015ffffffeell, KEY_RIGHT }, { 0x00000016ffffffeell, KEY_ENTER }, { 0x00000017ffffffeell, KEY_ESC }, /* iMON Knob values */ { 0x000100ffffffffeell, KEY_VOLUMEUP }, { 0x010000ffffffffeell, KEY_VOLUMEDOWN }, { 0x000008ffffffffeell, KEY_MUTE }, { 0, KEY_RESERVED }, } }; /* imon receiver front panel/knob key table for DH102*/ static const struct imon_usb_dev_descr imon_DH102 = { .flags = IMON_NO_FLAGS, .key_table = { { 0x000100000000ffeell, KEY_VOLUMEUP }, { 0x010000000000ffeell, KEY_VOLUMEDOWN }, { 0x000000010000ffeell, KEY_MUTE }, { 0x0000000f0000ffeell, KEY_MEDIA }, { 0x000000120000ffeell, KEY_UP }, { 0x000000130000ffeell, KEY_DOWN }, { 0x000000140000ffeell, KEY_LEFT }, { 0x000000150000ffeell, KEY_RIGHT }, { 0x000000160000ffeell, KEY_ENTER }, { 0x000000170000ffeell, KEY_ESC }, { 0x0000002b0000ffeell, KEY_EXIT }, { 0x0000002c0000ffeell, KEY_SELECT }, { 0x0000002d0000ffeell, KEY_MENU }, { 0, KEY_RESERVED } } }; /* imon ultrabay front panel key table */ static const struct imon_usb_dev_descr ultrabay_table = { .flags = IMON_SUPPRESS_REPEATED_KEYS, .key_table = { { 0x0000000f0000ffeell, KEY_MEDIA }, /* Go */ { 0x000000000100ffeell, KEY_UP }, { 0x000000000001ffeell, KEY_DOWN }, { 0x000000160000ffeell, KEY_ENTER }, { 0x0000001f0000ffeell, KEY_AUDIO }, /* Music */ { 0x000000200000ffeell, KEY_VIDEO }, /* Movie */ { 0x000000210000ffeell, KEY_CAMERA }, /* Photo */ { 0x000000270000ffeell, KEY_DVD }, /* DVD */ { 0x000000230000ffeell, KEY_TV }, /* TV */ { 0x000000050000ffeell, KEY_PREVIOUS }, /* Previous */ { 0x000000070000ffeell, KEY_REWIND }, { 0x000000040000ffeell, KEY_STOP }, { 0x000000020000ffeell, KEY_PLAYPAUSE }, { 0x000000080000ffeell, KEY_FASTFORWARD }, { 0x000000060000ffeell, KEY_NEXT }, /* Next */ { 0x000100000000ffeell, KEY_VOLUMEUP }, { 0x010000000000ffeell, KEY_VOLUMEDOWN }, { 0x000000010000ffeell, KEY_MUTE }, { 0, KEY_RESERVED }, } }; /* * USB Device ID for iMON USB Control Boards * * The Windows drivers contain 6 different inf files, more or less one for * each new device until the 0x0034-0x0046 devices, which all use the same * driver. Some of the devices in the 34-46 range haven't been definitively * identified yet. Early devices have either a TriGem Computer, Inc. or a * Samsung vendor ID (0x0aa8 and 0x04e8 respectively), while all later * devices use the SoundGraph vendor ID (0x15c2). This driver only supports * the ffdc and later devices, which do onboard decoding. */ static const struct usb_device_id imon_usb_id_table[] = { /* * Several devices with this same device ID, all use iMON_PAD.inf * SoundGraph iMON PAD (IR & VFD) * SoundGraph iMON PAD (IR & LCD) * SoundGraph iMON Knob (IR only) */ { USB_DEVICE(0x15c2, 0xffdc), .driver_info = (unsigned long)&imon_default_table }, /* * Newer devices, all driven by the latest iMON Windows driver, full * list of device IDs extracted via 'strings Setup/data1.hdr |grep 15c2' * Need user input to fill in details on unknown devices. */ /* SoundGraph iMON OEM Touch LCD (IR & 7" VGA LCD) */ { USB_DEVICE(0x15c2, 0x0034), .driver_info = (unsigned long)&imon_DH102 }, /* SoundGraph iMON OEM Touch LCD (IR & 4.3" VGA LCD) */ { USB_DEVICE(0x15c2, 0x0035), .driver_info = (unsigned long)&imon_default_table}, /* SoundGraph iMON OEM VFD (IR & VFD) */ { USB_DEVICE(0x15c2, 0x0036), .driver_info = (unsigned long)&imon_OEM_VFD }, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x0037), .driver_info = (unsigned long)&imon_default_table}, /* SoundGraph iMON OEM LCD (IR & LCD) */ { USB_DEVICE(0x15c2, 0x0038), .driver_info = (unsigned long)&imon_default_table}, /* SoundGraph iMON UltraBay (IR & LCD) */ { USB_DEVICE(0x15c2, 0x0039), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x003a), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x003b), .driver_info = (unsigned long)&imon_default_table}, /* SoundGraph iMON OEM Inside (IR only) */ { USB_DEVICE(0x15c2, 0x003c), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x003d), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x003e), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x003f), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x0040), .driver_info = (unsigned long)&imon_default_table}, /* SoundGraph iMON MINI (IR only) */ { USB_DEVICE(0x15c2, 0x0041), .driver_info = (unsigned long)&imon_default_table}, /* Antec Veris Multimedia Station EZ External (IR only) */ { USB_DEVICE(0x15c2, 0x0042), .driver_info = (unsigned long)&imon_default_table}, /* Antec Veris Multimedia Station Basic Internal (IR only) */ { USB_DEVICE(0x15c2, 0x0043), .driver_info = (unsigned long)&imon_default_table}, /* Antec Veris Multimedia Station Elite (IR & VFD) */ { USB_DEVICE(0x15c2, 0x0044), .driver_info = (unsigned long)&imon_default_table}, /* Antec Veris Multimedia Station Premiere (IR & LCD) */ { USB_DEVICE(0x15c2, 0x0045), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x0046), .driver_info = (unsigned long)&imon_default_table}, {} }; /* USB Device data */ static struct usb_driver imon_driver = { .name = MOD_NAME, .probe = imon_probe, .disconnect = imon_disconnect, .suspend = imon_suspend, .resume = imon_resume, .id_table = imon_usb_id_table, }; /* Module bookkeeping bits */ MODULE_AUTHOR(MOD_AUTHOR); MODULE_DESCRIPTION(MOD_DESC); MODULE_VERSION(MOD_VERSION); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(usb, imon_usb_id_table); static bool debug; module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes (default: no)"); /* lcd, vfd, vga or none? should be auto-detected, but can be overridden... */ static int display_type; module_param(display_type, int, S_IRUGO); MODULE_PARM_DESC(display_type, "Type of attached display. 0=autodetect, 1=vfd, 2=lcd, 3=vga, 4=none (default: autodetect)"); static int pad_stabilize = 1; module_param(pad_stabilize, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(pad_stabilize, "Apply stabilization algorithm to iMON PAD presses in arrow key mode. 0=disable, 1=enable (default)."); /* * In certain use cases, mouse mode isn't really helpful, and could actually * cause confusion, so allow disabling it when the IR device is open. */ static bool nomouse; module_param(nomouse, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(nomouse, "Disable mouse input device mode when IR device is open. 0=don't disable, 1=disable. (default: don't disable)"); /* threshold at which a pad push registers as an arrow key in kbd mode */ static int pad_thresh; module_param(pad_thresh, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(pad_thresh, "Threshold at which a pad push registers as an arrow key in kbd mode (default: 28)"); static void free_imon_context(struct imon_context *ictx) { struct device *dev = ictx->dev; usb_free_urb(ictx->tx_urb); WARN_ON(ictx->dev_present_intf0); usb_free_urb(ictx->rx_urb_intf0); WARN_ON(ictx->dev_present_intf1); usb_free_urb(ictx->rx_urb_intf1); kfree_rcu(ictx, rcu); dev_dbg(dev, "%s: iMON context freed\n", __func__); } /* * Called when the Display device (e.g. /dev/lcd0) * is opened by the application. */ static int display_open(struct inode *inode, struct file *file) { struct usb_interface *interface; struct imon_context *ictx = NULL; int subminor; int retval = 0; subminor = iminor(inode); interface = usb_find_interface(&imon_driver, subminor); if (!interface) { pr_err("could not find interface for minor %d\n", subminor); retval = -ENODEV; goto exit; } rcu_read_lock(); ictx = usb_get_intfdata(interface); if (!ictx || ictx->disconnected || !refcount_inc_not_zero(&ictx->users)) { rcu_read_unlock(); pr_err("no context found for minor %d\n", subminor); retval = -ENODEV; goto exit; } rcu_read_unlock(); mutex_lock(&ictx->lock); if (!ictx->display_supported) { pr_err("display not supported by device\n"); retval = -ENODEV; } else if (ictx->display_isopen) { pr_err("display port is already open\n"); retval = -EBUSY; } else { ictx->display_isopen = true; file->private_data = ictx; dev_dbg(ictx->dev, "display port opened\n"); } mutex_unlock(&ictx->lock); if (retval && refcount_dec_and_test(&ictx->users)) free_imon_context(ictx); exit: return retval; } /* * Called when the display device (e.g. /dev/lcd0) * is closed by the application. */ static int display_close(struct inode *inode, struct file *file) { struct imon_context *ictx = file->private_data; int retval = 0; mutex_lock(&ictx->lock); if (!ictx->display_supported) { pr_err("display not supported by device\n"); retval = -ENODEV; } else if (!ictx->display_isopen) { pr_err("display is not open\n"); retval = -EIO; } else { ictx->display_isopen = false; dev_dbg(ictx->dev, "display port closed\n"); } mutex_unlock(&ictx->lock); if (refcount_dec_and_test(&ictx->users)) free_imon_context(ictx); return retval; } /* * Sends a packet to the device -- this function must be called with * ictx->lock held, or its unlock/lock sequence while waiting for tx * to complete can/will lead to a deadlock. */ static int send_packet(struct imon_context *ictx) { unsigned int pipe; unsigned long timeout; int interval = 0; int retval = 0; struct usb_ctrlrequest *control_req = NULL; lockdep_assert_held(&ictx->lock); /* Check if we need to use control or interrupt urb */ if (!ictx->tx_control) { pipe = usb_sndintpipe(ictx->usbdev_intf0, ictx->tx_endpoint->bEndpointAddress); interval = ictx->tx_endpoint->bInterval; usb_fill_int_urb(ictx->tx_urb, ictx->usbdev_intf0, pipe, ictx->usb_tx_buf, sizeof(ictx->usb_tx_buf), usb_tx_callback, ictx, interval); ictx->tx_urb->actual_length = 0; } else { /* fill request into kmalloc'ed space: */ control_req = kmalloc(sizeof(*control_req), GFP_KERNEL); if (control_req == NULL) return -ENOMEM; /* setup packet is '21 09 0200 0001 0008' */ control_req->bRequestType = 0x21; control_req->bRequest = 0x09; control_req->wValue = cpu_to_le16(0x0200); control_req->wIndex = cpu_to_le16(0x0001); control_req->wLength = cpu_to_le16(0x0008); /* control pipe is endpoint 0x00 */ pipe = usb_sndctrlpipe(ictx->usbdev_intf0, 0); /* build the control urb */ usb_fill_control_urb(ictx->tx_urb, ictx->usbdev_intf0, pipe, (unsigned char *)control_req, ictx->usb_tx_buf, sizeof(ictx->usb_tx_buf), usb_tx_callback, ictx); ictx->tx_urb->actual_length = 0; } reinit_completion(&ictx->tx.finished); ictx->tx.busy = true; smp_rmb(); /* ensure later readers know we're busy */ retval = usb_submit_urb(ictx->tx_urb, GFP_KERNEL); if (retval) { ictx->tx.busy = false; smp_rmb(); /* ensure later readers know we're not busy */ pr_err_ratelimited("error submitting urb(%d)\n", retval); } else { /* Wait for transmission to complete (or abort or timeout) */ retval = wait_for_completion_interruptible_timeout(&ictx->tx.finished, 10 * HZ); if (retval <= 0) { usb_kill_urb(ictx->tx_urb); pr_err_ratelimited("task interrupted\n"); if (retval < 0) ictx->tx.status = retval; else ictx->tx.status = -ETIMEDOUT; } ictx->tx.busy = false; retval = ictx->tx.status; if (retval) pr_err_ratelimited("packet tx failed (%d)\n", retval); } kfree(control_req); /* * Induce a mandatory delay before returning, as otherwise, * send_packet can get called so rapidly as to overwhelm the device, * particularly on faster systems and/or those with quirky usb. */ timeout = msecs_to_jiffies(ictx->send_packet_delay); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(timeout); return retval; } /* * Sends an associate packet to the iMON 2.4G. * * This might not be such a good idea, since it has an id collision with * some versions of the "IR & VFD" combo. The only way to determine if it * is an RF version is to look at the product description string. (Which * we currently do not fetch). */ static int send_associate_24g(struct imon_context *ictx) { const unsigned char packet[8] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20 }; if (!ictx) { pr_err("no context for device\n"); return -ENODEV; } if (!ictx->dev_present_intf0) { pr_err("no iMON device present\n"); return -ENODEV; } memcpy(ictx->usb_tx_buf, packet, sizeof(packet)); return send_packet(ictx); } /* * Sends packets to setup and show clock on iMON display * * Arguments: year - last 2 digits of year, month - 1..12, * day - 1..31, dow - day of the week (0-Sun...6-Sat), * hour - 0..23, minute - 0..59, second - 0..59 */ static int send_set_imon_clock(struct imon_context *ictx, unsigned int year, unsigned int month, unsigned int day, unsigned int dow, unsigned int hour, unsigned int minute, unsigned int second) { unsigned char clock_enable_pkt[IMON_CLOCK_ENABLE_PACKETS][8]; int retval = 0; int i; if (!ictx) { pr_err("no context for device\n"); return -ENODEV; } switch (ictx->display_type) { case IMON_DISPLAY_TYPE_LCD: clock_enable_pkt[0][0] = 0x80; clock_enable_pkt[0][1] = year; clock_enable_pkt[0][2] = month-1; clock_enable_pkt[0][3] = day; clock_enable_pkt[0][4] = hour; clock_enable_pkt[0][5] = minute; clock_enable_pkt[0][6] = second; clock_enable_pkt[1][0] = 0x80; clock_enable_pkt[1][1] = 0; clock_enable_pkt[1][2] = 0; clock_enable_pkt[1][3] = 0; clock_enable_pkt[1][4] = 0; clock_enable_pkt[1][5] = 0; clock_enable_pkt[1][6] = 0; if (ictx->product == 0xffdc) { clock_enable_pkt[0][7] = 0x50; clock_enable_pkt[1][7] = 0x51; } else { clock_enable_pkt[0][7] = 0x88; clock_enable_pkt[1][7] = 0x8a; } break; case IMON_DISPLAY_TYPE_VFD: clock_enable_pkt[0][0] = year; clock_enable_pkt[0][1] = month-1; clock_enable_pkt[0][2] = day; clock_enable_pkt[0][3] = dow; clock_enable_pkt[0][4] = hour; clock_enable_pkt[0][5] = minute; clock_enable_pkt[0][6] = second; clock_enable_pkt[0][7] = 0x40; clock_enable_pkt[1][0] = 0; clock_enable_pkt[1][1] = 0; clock_enable_pkt[1][2] = 1; clock_enable_pkt[1][3] = 0; clock_enable_pkt[1][4] = 0; clock_enable_pkt[1][5] = 0; clock_enable_pkt[1][6] = 0; clock_enable_pkt[1][7] = 0x42; break; default: return -ENODEV; } for (i = 0; i < IMON_CLOCK_ENABLE_PACKETS; i++) { memcpy(ictx->usb_tx_buf, clock_enable_pkt[i], 8); retval = send_packet(ictx); if (retval) { pr_err("send_packet failed for packet %d\n", i); break; } } return retval; } /* * These are the sysfs functions to handle the association on the iMON 2.4G LT. */ static ssize_t associate_remote_show(struct device *d, struct device_attribute *attr, char *buf) { struct imon_context *ictx = dev_get_drvdata(d); if (!ictx) return -ENODEV; mutex_lock(&ictx->lock); if (ictx->rf_isassociating) strscpy(buf, "associating\n", PAGE_SIZE); else strscpy(buf, "closed\n", PAGE_SIZE); dev_info(d, "Visit https://www.lirc.org/html/imon-24g.html for instructions on how to associate your iMON 2.4G DT/LT remote\n"); mutex_unlock(&ictx->lock); return strlen(buf); } static ssize_t associate_remote_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct imon_context *ictx; ictx = dev_get_drvdata(d); if (!ictx) return -ENODEV; mutex_lock(&ictx->lock); ictx->rf_isassociating = true; send_associate_24g(ictx); mutex_unlock(&ictx->lock); return count; } /* * sysfs functions to control internal imon clock */ static ssize_t imon_clock_show(struct device *d, struct device_attribute *attr, char *buf) { struct imon_context *ictx = dev_get_drvdata(d); size_t len; if (!ictx) return -ENODEV; mutex_lock(&ictx->lock); if (!ictx->display_supported) { len = sysfs_emit(buf, "Not supported."); } else { len = sysfs_emit(buf, "To set the clock on your iMON display:\n" "# date \"+%%y %%m %%d %%w %%H %%M %%S\" > imon_clock\n" "%s", ictx->display_isopen ? "\nNOTE: imon device must be closed\n" : ""); } mutex_unlock(&ictx->lock); return len; } static ssize_t imon_clock_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct imon_context *ictx = dev_get_drvdata(d); ssize_t retval; unsigned int year, month, day, dow, hour, minute, second; if (!ictx) return -ENODEV; mutex_lock(&ictx->lock); if (!ictx->display_supported) { retval = -ENODEV; goto exit; } else if (ictx->display_isopen) { retval = -EBUSY; goto exit; } if (sscanf(buf, "%u %u %u %u %u %u %u", &year, &month, &day, &dow, &hour, &minute, &second) != 7) { retval = -EINVAL; goto exit; } if ((month < 1 || month > 12) || (day < 1 || day > 31) || (dow > 6) || (hour > 23) || (minute > 59) || (second > 59)) { retval = -EINVAL; goto exit; } retval = send_set_imon_clock(ictx, year, month, day, dow, hour, minute, second); if (retval) goto exit; retval = count; exit: mutex_unlock(&ictx->lock); return retval; } static DEVICE_ATTR_RW(imon_clock); static DEVICE_ATTR_RW(associate_remote); static struct attribute *imon_display_sysfs_entries[] = { &dev_attr_imon_clock.attr, NULL }; static const struct attribute_group imon_display_attr_group = { .attrs = imon_display_sysfs_entries }; static struct attribute *imon_rf_sysfs_entries[] = { &dev_attr_associate_remote.attr, NULL }; static const struct attribute_group imon_rf_attr_group = { .attrs = imon_rf_sysfs_entries }; /* * Writes data to the VFD. The iMON VFD is 2x16 characters * and requires data in 5 consecutive USB interrupt packets, * each packet but the last carrying 7 bytes. * * I don't know if the VFD board supports features such as * scrolling, clearing rows, blanking, etc. so at * the caller must provide a full screen of data. If fewer * than 32 bytes are provided spaces will be appended to * generate a full screen. */ static ssize_t vfd_write(struct file *file, const char __user *buf, size_t n_bytes, loff_t *pos) { int i; int offset; int seq; int retval = 0; struct imon_context *ictx = file->private_data; static const unsigned char vfd_packet6[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF }; if (ictx->disconnected) return -ENODEV; if (mutex_lock_interruptible(&ictx->lock)) return -ERESTARTSYS; if (!ictx->dev_present_intf0) { pr_err_ratelimited("no iMON device present\n"); retval = -ENODEV; goto exit; } if (n_bytes <= 0 || n_bytes > 32) { pr_err_ratelimited("invalid payload size\n"); retval = -EINVAL; goto exit; } if (copy_from_user(ictx->tx.data_buf, buf, n_bytes)) { retval = -EFAULT; goto exit; } /* Pad with spaces */ for (i = n_bytes; i < 32; ++i) ictx->tx.data_buf[i] = ' '; for (i = 32; i < 35; ++i) ictx->tx.data_buf[i] = 0xFF; offset = 0; seq = 0; do { memcpy(ictx->usb_tx_buf, ictx->tx.data_buf + offset, 7); ictx->usb_tx_buf[7] = (unsigned char) seq; retval = send_packet(ictx); if (retval) { pr_err_ratelimited("send packet #%d failed\n", seq / 2); goto exit; } else { seq += 2; offset += 7; } } while (offset < 35); /* Send packet #6 */ memcpy(ictx->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6)); ictx->usb_tx_buf[7] = (unsigned char) seq; retval = send_packet(ictx); if (retval) pr_err_ratelimited("send packet #%d failed\n", seq / 2); exit: mutex_unlock(&ictx->lock); return (!retval) ? n_bytes : retval; } /* * Writes data to the LCD. The iMON OEM LCD screen expects 8-byte * packets. We accept data as 16 hexadecimal digits, followed by a * newline (to make it easy to drive the device from a command-line * -- even though the actual binary data is a bit complicated). * * The device itself is not a "traditional" text-mode display. It's * actually a 16x96 pixel bitmap display. That means if you want to * display text, you've got to have your own "font" and translate the * text into bitmaps for display. This is really flexible (you can * display whatever diacritics you need, and so on), but it's also * a lot more complicated than most LCDs... */ static ssize_t lcd_write(struct file *file, const char __user *buf, size_t n_bytes, loff_t *pos) { int retval = 0; struct imon_context *ictx = file->private_data; if (ictx->disconnected) return -ENODEV; mutex_lock(&ictx->lock); if (!ictx->display_supported) { pr_err_ratelimited("no iMON display present\n"); retval = -ENODEV; goto exit; } if (n_bytes != 8) { pr_err_ratelimited("invalid payload size: %d (expected 8)\n", (int)n_bytes); retval = -EINVAL; goto exit; } if (copy_from_user(ictx->usb_tx_buf, buf, 8)) { retval = -EFAULT; goto exit; } retval = send_packet(ictx); if (retval) { pr_err_ratelimited("send packet failed!\n"); goto exit; } else { dev_dbg(ictx->dev, "%s: write %d bytes to LCD\n", __func__, (int) n_bytes); } exit: mutex_unlock(&ictx->lock); return (!retval) ? n_bytes : retval; } /* * Callback function for USB core API: transmit data */ static void usb_tx_callback(struct urb *urb) { struct imon_context *ictx; if (!urb) return; ictx = (struct imon_context *)urb->context; if (!ictx) return; ictx->tx.status = urb->status; /* notify waiters that write has finished */ ictx->tx.busy = false; smp_rmb(); /* ensure later readers know we're not busy */ complete(&ictx->tx.finished); } /* * report touchscreen input */ static void imon_touch_display_timeout(struct timer_list *t) { struct imon_context *ictx = timer_container_of(ictx, t, ttimer); if (ictx->display_type != IMON_DISPLAY_TYPE_VGA) return; input_report_abs(ictx->touch, ABS_X, ictx->touch_x); input_report_abs(ictx->touch, ABS_Y, ictx->touch_y); input_report_key(ictx->touch, BTN_TOUCH, 0x00); input_sync(ictx->touch); } /* * iMON IR receivers support two different signal sets -- those used by * the iMON remotes, and those used by the Windows MCE remotes (which is * really just RC-6), but only one or the other at a time, as the signals * are decoded onboard the receiver. * * This function gets called two different ways, one way is from * rc_register_device, for initial protocol selection/setup, and the other is * via a userspace-initiated protocol change request, either by direct sysfs * prodding or by something like ir-keytable. In the rc_register_device case, * the imon context lock is already held, but when initiated from userspace, * it is not, so we must acquire it prior to calling send_packet, which * requires that the lock is held. */ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_proto) { int retval; struct imon_context *ictx = rc->priv; struct device *dev = ictx->dev; const bool unlock = mutex_trylock(&ictx->lock); unsigned char ir_proto_packet[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 }; if (*rc_proto && !(*rc_proto & rc->allowed_protocols)) dev_warn(dev, "Looks like you're trying to use an IR protocol this device does not support\n"); if (*rc_proto & RC_PROTO_BIT_RC6_MCE) { dev_dbg(dev, "Configuring IR receiver for MCE protocol\n"); ir_proto_packet[0] = 0x01; *rc_proto = RC_PROTO_BIT_RC6_MCE; } else if (*rc_proto & RC_PROTO_BIT_IMON) { dev_dbg(dev, "Configuring IR receiver for iMON protocol\n"); if (!pad_stabilize) dev_dbg(dev, "PAD stabilize functionality disabled\n"); /* ir_proto_packet[0] = 0x00; // already the default */ *rc_proto = RC_PROTO_BIT_IMON; } else { dev_warn(dev, "Unsupported IR protocol specified, overriding to iMON IR protocol\n"); if (!pad_stabilize) dev_dbg(dev, "PAD stabilize functionality disabled\n"); /* ir_proto_packet[0] = 0x00; // already the default */ *rc_proto = RC_PROTO_BIT_IMON; } memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet)); retval = send_packet(ictx); if (retval) goto out; ictx->rc_proto = *rc_proto; ictx->pad_mouse = false; out: if (unlock) mutex_unlock(&ictx->lock); return retval; } /* * The directional pad behaves a bit differently, depending on whether this is * one of the older ffdc devices or a newer device. Newer devices appear to * have a higher resolution matrix for more precise mouse movement, but it * makes things overly sensitive in keyboard mode, so we do some interesting * contortions to make it less touchy. Older devices run through the same * routine with shorter timeout and a smaller threshold. */ static int stabilize(int a, int b, u16 timeout, u16 threshold) { ktime_t ct; static ktime_t prev_time; static ktime_t hit_time; static int x, y, prev_result, hits; int result = 0; long msec, msec_hit; ct = ktime_get(); msec = ktime_ms_delta(ct, prev_time); msec_hit = ktime_ms_delta(ct, hit_time); if (msec > 100) { x = 0; y = 0; hits = 0; } x += a; y += b; prev_time = ct; if (abs(x) > threshold || abs(y) > threshold) { if (abs(y) > abs(x)) result = (y > 0) ? 0x7F : 0x80; else result = (x > 0) ? 0x7F00 : 0x8000; x = 0; y = 0; if (result == prev_result) { hits++; if (hits > 3) { switch (result) { case 0x7F: y = 17 * threshold / 30; break; case 0x80: y -= 17 * threshold / 30; break; case 0x7F00: x = 17 * threshold / 30; break; case 0x8000: x -= 17 * threshold / 30; break; } } if (hits == 2 && msec_hit < timeout) { result = 0; hits = 1; } } else { prev_result = result; hits = 1; hit_time = ct; } } return result; } static u32 imon_remote_key_lookup(struct imon_context *ictx, u32 scancode) { u32 keycode; u32 release; bool is_release_code = false; /* Look for the initial press of a button */ keycode = rc_g_keycode_from_table(ictx->rdev, scancode); ictx->rc_toggle = 0x0; ictx->rc_scancode = scancode; /* Look for the release of a button */ if (keycode == KEY_RESERVED) { release = scancode & ~0x4000; keycode = rc_g_keycode_from_table(ictx->rdev, release); if (keycode != KEY_RESERVED) is_release_code = true; } ictx->release_code = is_release_code; return keycode; } static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 scancode) { u32 keycode; #define MCE_KEY_MASK 0x7000 #define MCE_TOGGLE_BIT 0x8000 /* * On some receivers, mce keys decode to 0x8000f04xx and 0x8000f84xx * (the toggle bit flipping between alternating key presses), while * on other receivers, we see 0x8000f74xx and 0x8000ff4xx. To keep * the table trim, we always or in the bits to look up 0x8000ff4xx, * but we can't or them into all codes, as some keys are decoded in * a different way w/o the same use of the toggle bit... */ if (scancode & 0x80000000) scancode = scancode | MCE_KEY_MASK | MCE_TOGGLE_BIT; ictx->rc_scancode = scancode; keycode = rc_g_keycode_from_table(ictx->rdev, scancode); /* not used in mce mode, but make sure we know its false */ ictx->release_code = false; return keycode; } static u32 imon_panel_key_lookup(struct imon_context *ictx, u64 code) { const struct imon_panel_key_table *key_table; u32 keycode = KEY_RESERVED; int i; key_table = ictx->dev_descr->key_table; for (i = 0; key_table[i].hw_code != 0; i++) { if (key_table[i].hw_code == (code | 0xffee)) { keycode = key_table[i].keycode; break; } } ictx->release_code = false; return keycode; } static bool imon_mouse_event(struct imon_context *ictx, unsigned char *buf, int len) { signed char rel_x = 0x00, rel_y = 0x00; u8 right_shift = 1; bool mouse_input = true; int dir = 0; unsigned long flags; spin_lock_irqsave(&ictx->kc_lock, flags); /* newer iMON device PAD or mouse button */ if (ictx->product != 0xffdc && (buf[0] & 0x01) && len == 5) { rel_x = buf[2]; rel_y = buf[3]; right_shift = 1; /* 0xffdc iMON PAD or mouse button input */ } else if (ictx->product == 0xffdc && (buf[0] & 0x40) && !((buf[1] & 0x01) || ((buf[1] >> 2) & 0x01))) { rel_x = (buf[1] & 0x08) | (buf[1] & 0x10) >> 2 | (buf[1] & 0x20) >> 4 | (buf[1] & 0x40) >> 6; if (buf[0] & 0x02) rel_x |= ~0x0f; rel_x = rel_x + rel_x / 2; rel_y = (buf[2] & 0x08) | (buf[2] & 0x10) >> 2 | (buf[2] & 0x20) >> 4 | (buf[2] & 0x40) >> 6; if (buf[0] & 0x01) rel_y |= ~0x0f; rel_y = rel_y + rel_y / 2; right_shift = 2; /* some ffdc devices decode mouse buttons differently... */ } else if (ictx->product == 0xffdc && (buf[0] == 0x68)) { right_shift = 2; /* ch+/- buttons, which we use for an emulated scroll wheel */ } else if (ictx->kc == KEY_CHANNELUP && (buf[2] & 0x40) != 0x40) { dir = 1; } else if (ictx->kc == KEY_CHANNELDOWN && (buf[2] & 0x40) != 0x40) { dir = -1; } else mouse_input = false; spin_unlock_irqrestore(&ictx->kc_lock, flags); if (mouse_input) { dev_dbg(ictx->dev, "sending mouse data via input subsystem\n"); if (dir) { input_report_rel(ictx->idev, REL_WHEEL, dir); } else if (rel_x || rel_y) { input_report_rel(ictx->idev, REL_X, rel_x); input_report_rel(ictx->idev, REL_Y, rel_y); } else { input_report_key(ictx->idev, BTN_LEFT, buf[1] & 0x1); input_report_key(ictx->idev, BTN_RIGHT, buf[1] >> right_shift & 0x1); } input_sync(ictx->idev); spin_lock_irqsave(&ictx->kc_lock, flags); ictx->last_keycode = ictx->kc; spin_unlock_irqrestore(&ictx->kc_lock, flags); } return mouse_input; } static void imon_touch_event(struct imon_context *ictx, unsigned char *buf) { mod_timer(&ictx->ttimer, jiffies + TOUCH_TIMEOUT); ictx->touch_x = (buf[0] << 4) | (buf[1] >> 4); ictx->touch_y = 0xfff - ((buf[2] << 4) | (buf[1] & 0xf)); input_report_abs(ictx->touch, ABS_X, ictx->touch_x); input_report_abs(ictx->touch, ABS_Y, ictx->touch_y); input_report_key(ictx->touch, BTN_TOUCH, 0x01); input_sync(ictx->touch); } static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf) { int dir = 0; signed char rel_x = 0x00, rel_y = 0x00; u16 timeout, threshold; u32 scancode = KEY_RESERVED; unsigned long flags; /* * The imon directional pad functions more like a touchpad. Bytes 3 & 4 * contain a position coordinate (x,y), with each component ranging * from -14 to 14. We want to down-sample this to only 4 discrete values * for up/down/left/right arrow keys. Also, when you get too close to * diagonals, it has a tendency to jump back and forth, so lets try to * ignore when they get too close. */ if (ictx->product != 0xffdc) { /* first, pad to 8 bytes so it conforms with everything else */ buf[5] = buf[6] = buf[7] = 0; timeout = 500; /* in msecs */ /* (2*threshold) x (2*threshold) square */ threshold = pad_thresh ? pad_thresh : 28; rel_x = buf[2]; rel_y = buf[3]; if (ictx->rc_proto == RC_PROTO_BIT_IMON && pad_stabilize) { if ((buf[1] == 0) && ((rel_x != 0) || (rel_y != 0))) { dir = stabilize((int)rel_x, (int)rel_y, timeout, threshold); if (!dir) { spin_lock_irqsave(&ictx->kc_lock, flags); ictx->kc = KEY_UNKNOWN; spin_unlock_irqrestore(&ictx->kc_lock, flags); return; } buf[2] = dir & 0xFF; buf[3] = (dir >> 8) & 0xFF; scancode = be32_to_cpu(*((__be32 *)buf)); } } else { /* * Hack alert: instead of using keycodes, we have * to use hard-coded scancodes here... */ if (abs(rel_y) > abs(rel_x)) { buf[2] = (rel_y > 0) ? 0x7F : 0x80; buf[3] = 0; if (rel_y > 0) scancode = 0x01007f00; /* KEY_DOWN */ else scancode = 0x01008000; /* KEY_UP */ } else { buf[2] = 0; buf[3] = (rel_x > 0) ? 0x7F : 0x80; if (rel_x > 0) scancode = 0x0100007f; /* KEY_RIGHT */ else scancode = 0x01000080; /* KEY_LEFT */ } } /* * Handle on-board decoded pad events for e.g. older VFD/iMON-Pad * device (15c2:ffdc). The remote generates various codes from * 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates * 0x688301b7 and the right one 0x688481b7. All other keys generate * 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with * reversed endianness. Extract direction from buffer, rotate endianness, * adjust sign and feed the values into stabilize(). The resulting codes * will be 0x01008000, 0x01007F00, which match the newer devices. */ } else { timeout = 10; /* in msecs */ /* (2*threshold) x (2*threshold) square */ threshold = pad_thresh ? pad_thresh : 15; /* buf[1] is x */ rel_x = (buf[1] & 0x08) | (buf[1] & 0x10) >> 2 | (buf[1] & 0x20) >> 4 | (buf[1] & 0x40) >> 6; if (buf[0] & 0x02) rel_x |= ~0x10+1; /* buf[2] is y */ rel_y = (buf[2] & 0x08) | (buf[2] & 0x10) >> 2 | (buf[2] & 0x20) >> 4 | (buf[2] & 0x40) >> 6; if (buf[0] & 0x01) rel_y |= ~0x10+1; buf[0] = 0x01; buf[1] = buf[4] = buf[5] = buf[6] = buf[7] = 0; if (ictx->rc_proto == RC_PROTO_BIT_IMON && pad_stabilize) { dir = stabilize((int)rel_x, (int)rel_y, timeout, threshold); if (!dir) { spin_lock_irqsave(&ictx->kc_lock, flags); ictx->kc = KEY_UNKNOWN; spin_unlock_irqrestore(&ictx->kc_lock, flags); return; } buf[2] = dir & 0xFF; buf[3] = (dir >> 8) & 0xFF; scancode = be32_to_cpu(*((__be32 *)buf)); } else { /* * Hack alert: instead of using keycodes, we have * to use hard-coded scancodes here... */ if (abs(rel_y) > abs(rel_x)) { buf[2] = (rel_y > 0) ? 0x7F : 0x80; buf[3] = 0; if (rel_y > 0) scancode = 0x01007f00; /* KEY_DOWN */ else scancode = 0x01008000; /* KEY_UP */ } else { buf[2] = 0; buf[3] = (rel_x > 0) ? 0x7F : 0x80; if (rel_x > 0) scancode = 0x0100007f; /* KEY_RIGHT */ else scancode = 0x01000080; /* KEY_LEFT */ } } } if (scancode) { spin_lock_irqsave(&ictx->kc_lock, flags); ictx->kc = imon_remote_key_lookup(ictx, scancode); spin_unlock_irqrestore(&ictx->kc_lock, flags); } } /* * figure out if these is a press or a release. We don't actually * care about repeats, as those will be auto-generated within the IR * subsystem for repeating scancodes. */ static int imon_parse_press_type(struct imon_context *ictx, unsigned char *buf, u8 ktype) { int press_type = 0; unsigned long flags; spin_lock_irqsave(&ictx->kc_lock, flags); /* key release of 0x02XXXXXX key */ if (ictx->kc == KEY_RESERVED && buf[0] == 0x02 && buf[3] == 0x00) ictx->kc = ictx->last_keycode; /* mouse button release on (some) 0xffdc devices */ else if (ictx->kc == KEY_RESERVED && buf[0] == 0x68 && buf[1] == 0x82 && buf[2] == 0x81 && buf[3] == 0xb7) ictx->kc = ictx->last_keycode; /* mouse button release on (some other) 0xffdc devices */ else if (ictx->kc == KEY_RESERVED && buf[0] == 0x01 && buf[1] == 0x00 && buf[2] == 0x81 && buf[3] == 0xb7) ictx->kc = ictx->last_keycode; /* mce-specific button handling, no keyup events */ else if (ktype == IMON_KEY_MCE) { ictx->rc_toggle = buf[2]; press_type = 1; /* incoherent or irrelevant data */ } else if (ictx->kc == KEY_RESERVED) press_type = -EINVAL; /* key release of 0xXXXXXXb7 key */ else if (ictx->release_code) press_type = 0; /* this is a button press */ else press_type = 1; spin_unlock_irqrestore(&ictx->kc_lock, flags); return press_type; } /* * Process the incoming packet */ static void imon_incoming_packet(struct imon_context *ictx, struct urb *urb, int intf) { int len = urb->actual_length; unsigned char *buf = urb->transfer_buffer; struct device *dev = ictx->dev; unsigned long flags; u32 kc; u64 scancode; int press_type = 0; ktime_t t; static ktime_t prev_time; u8 ktype; /* filter out junk data on the older 0xffdc imon devices */ if ((buf[0] == 0xff) && (buf[1] == 0xff) && (buf[2] == 0xff)) return; /* Figure out what key was pressed */ if (len == 8 && buf[7] == 0xee) { scancode = be64_to_cpu(*((__be64 *)buf)); ktype = IMON_KEY_PANEL; kc = imon_panel_key_lookup(ictx, scancode); ictx->release_code = false; } else { scancode = be32_to_cpu(*((__be32 *)buf)); if (ictx->rc_proto == RC_PROTO_BIT_RC6_MCE) { ktype = IMON_KEY_IMON; if (buf[0] == 0x80) ktype = IMON_KEY_MCE; kc = imon_mce_key_lookup(ictx, scancode); } else { ktype = IMON_KEY_IMON; kc = imon_remote_key_lookup(ictx, scancode); } } spin_lock_irqsave(&ictx->kc_lock, flags); /* keyboard/mouse mode toggle button */ if (kc == KEY_KEYBOARD && !ictx->release_code) { ictx->last_keycode = kc; if (!nomouse) { ictx->pad_mouse = !ictx->pad_mouse; dev_dbg(dev, "toggling to %s mode\n", ictx->pad_mouse ? "mouse" : "keyboard"); spin_unlock_irqrestore(&ictx->kc_lock, flags); return; } else { ictx->pad_mouse = false; dev_dbg(dev, "mouse mode disabled, passing key value\n"); } } ictx->kc = kc; spin_unlock_irqrestore(&ictx->kc_lock, flags); /* send touchscreen events through input subsystem if touchpad data */ if (ictx->touch && len == 8 && buf[7] == 0x86) { imon_touch_event(ictx, buf); return; /* look for mouse events with pad in mouse mode */ } else if (ictx->pad_mouse) { if (imon_mouse_event(ictx, buf, len)) return; } /* Now for some special handling to convert pad input to arrow keys */ if (((len == 5) && (buf[0] == 0x01) && (buf[4] == 0x00)) || ((len == 8) && (buf[0] & 0x40) && !(buf[1] & 0x1 || buf[1] >> 2 & 0x1))) { len = 8; imon_pad_to_keys(ictx, buf); } if (debug) { printk(KERN_INFO "intf%d decoded packet: %*ph\n", intf, len, buf); } press_type = imon_parse_press_type(ictx, buf, ktype); if (press_type < 0) goto not_input_data; if (ktype != IMON_KEY_PANEL) { if (press_type == 0) rc_keyup(ictx->rdev); else { enum rc_proto proto; if (ictx->rc_proto == RC_PROTO_BIT_RC6_MCE) proto = RC_PROTO_RC6_MCE; else if (ictx->rc_proto == RC_PROTO_BIT_IMON) proto = RC_PROTO_IMON; else return; rc_keydown(ictx->rdev, proto, ictx->rc_scancode, ictx->rc_toggle); spin_lock_irqsave(&ictx->kc_lock, flags); ictx->last_keycode = ictx->kc; spin_unlock_irqrestore(&ictx->kc_lock, flags); } return; } /* Only panel type events left to process now */ spin_lock_irqsave(&ictx->kc_lock, flags); t = ktime_get(); /* KEY repeats from knob and panel that need to be suppressed */ if (ictx->kc == KEY_MUTE || ictx->dev_descr->flags & IMON_SUPPRESS_REPEATED_KEYS) { if (ictx->kc == ictx->last_keycode && ktime_ms_delta(t, prev_time) < ictx->idev->rep[REP_DELAY]) { spin_unlock_irqrestore(&ictx->kc_lock, flags); return; } } prev_time = t; kc = ictx->kc; spin_unlock_irqrestore(&ictx->kc_lock, flags); input_report_key(ictx->idev, kc, press_type); input_sync(ictx->idev); /* panel keys don't generate a release */ input_report_key(ictx->idev, kc, 0); input_sync(ictx->idev); spin_lock_irqsave(&ictx->kc_lock, flags); ictx->last_keycode = kc; spin_unlock_irqrestore(&ictx->kc_lock, flags); return; not_input_data: if (len != 8) { dev_warn(dev, "imon %s: invalid incoming packet size (len = %d, intf%d)\n", __func__, len, intf); return; } /* iMON 2.4G associate frame */ if (buf[0] == 0x00 && buf[2] == 0xFF && /* REFID */ buf[3] == 0xFF && buf[4] == 0xFF && buf[5] == 0xFF && /* iMON 2.4G */ ((buf[6] == 0x4E && buf[7] == 0xDF) || /* LT */ (buf[6] == 0x5E && buf[7] == 0xDF))) { /* DT */ dev_warn(dev, "%s: remote associated refid=%02X\n", __func__, buf[1]); ictx->rf_isassociating = false; } } /* * Callback function for USB core API: receive data */ static void usb_rx_callback_intf0(struct urb *urb) { struct imon_context *ictx; int intfnum = 0; if (!urb) return; ictx = (struct imon_context *)urb->context; if (!ictx) return; switch (urb->status) { case -ENOENT: /* usbcore unlink successful! */ return; case -ESHUTDOWN: /* transport endpoint was shut down */ break; case 0: /* * if we get a callback before we're done configuring the hardware, we * can't yet process the data, as there's nowhere to send it, but we * still need to submit a new rx URB to avoid wedging the hardware */ if (ictx->dev_present_intf0) imon_incoming_packet(ictx, urb, intfnum); break; case -ECONNRESET: case -EILSEQ: case -EPROTO: case -EPIPE: dev_warn(ictx->dev, "imon %s: status(%d)\n", __func__, urb->status); return; default: dev_warn(ictx->dev, "imon %s: status(%d): ignored\n", __func__, urb->status); break; } usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC); } static void usb_rx_callback_intf1(struct urb *urb) { struct imon_context *ictx; int intfnum = 1; if (!urb) return; ictx = (struct imon_context *)urb->context; if (!ictx) return; switch (urb->status) { case -ENOENT: /* usbcore unlink successful! */ return; case -ESHUTDOWN: /* transport endpoint was shut down */ break; case 0: /* * if we get a callback before we're done configuring the hardware, we * can't yet process the data, as there's nowhere to send it, but we * still need to submit a new rx URB to avoid wedging the hardware */ if (ictx->dev_present_intf1) imon_incoming_packet(ictx, urb, intfnum); break; case -ECONNRESET: case -EILSEQ: case -EPROTO: case -EPIPE: dev_warn(ictx->dev, "imon %s: status(%d)\n", __func__, urb->status); return; default: dev_warn(ictx->dev, "imon %s: status(%d): ignored\n", __func__, urb->status); break; } usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC); } /* * The 0x15c2:0xffdc device ID was used for umpteen different imon * devices, and all of them constantly spew interrupts, even when there * is no actual data to report. However, byte 6 of this buffer looks like * its unique across device variants, so we're trying to key off that to * figure out which display type (if any) and what IR protocol the device * actually supports. These devices have their IR protocol hard-coded into * their firmware, they can't be changed on the fly like the newer hardware. */ static void imon_get_ffdc_type(struct imon_context *ictx) { u8 ffdc_cfg_byte = ictx->usb_rx_buf[6]; u8 detected_display_type = IMON_DISPLAY_TYPE_NONE; u64 allowed_protos = RC_PROTO_BIT_IMON; switch (ffdc_cfg_byte) { /* iMON Knob, no display, iMON IR + vol knob */ case 0x21: dev_info(ictx->dev, "0xffdc iMON Knob, iMON IR"); ictx->display_supported = false; break; /* iMON 2.4G LT (usb stick), no display, iMON RF */ case 0x4e: dev_info(ictx->dev, "0xffdc iMON 2.4G LT, iMON RF"); ictx->display_supported = false; ictx->rf_device = true; break; /* iMON VFD, no IR (does have vol knob tho) */ case 0x35: dev_info(ictx->dev, "0xffdc iMON VFD + knob, no IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; break; /* iMON VFD, iMON IR */ case 0x24: case 0x30: case 0x85: dev_info(ictx->dev, "0xffdc iMON VFD, iMON IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; break; /* iMON VFD, MCE IR */ case 0x46: case 0x9e: dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; allowed_protos = RC_PROTO_BIT_RC6_MCE; break; /* iMON VFD, iMON or MCE IR */ case 0x7e: dev_info(ictx->dev, "0xffdc iMON VFD, iMON or MCE IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; allowed_protos |= RC_PROTO_BIT_RC6_MCE; break; /* iMON LCD, MCE IR */ case 0x9f: dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR"); detected_display_type = IMON_DISPLAY_TYPE_LCD; allowed_protos = RC_PROTO_BIT_RC6_MCE; break; /* no display, iMON IR */ case 0x26: dev_info(ictx->dev, "0xffdc iMON Inside, iMON IR"); ictx->display_supported = false; break; /* Soundgraph iMON UltraBay */ case 0x98: dev_info(ictx->dev, "0xffdc iMON UltraBay, LCD + IR"); detected_display_type = IMON_DISPLAY_TYPE_LCD; allowed_protos = RC_PROTO_BIT_IMON | RC_PROTO_BIT_RC6_MCE; ictx->dev_descr = &ultrabay_table; break; default: dev_info(ictx->dev, "Unknown 0xffdc device, defaulting to VFD and iMON IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; /* * We don't know which one it is, allow user to set the * RC6 one from userspace if IMON wasn't correct. */ allowed_protos |= RC_PROTO_BIT_RC6_MCE; break; } printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte); ictx->display_type = detected_display_type; ictx->rc_proto = allowed_protos; } static void imon_set_display_type(struct imon_context *ictx) { u8 configured_display_type = IMON_DISPLAY_TYPE_VFD; /* * Try to auto-detect the type of display if the user hasn't set * it by hand via the display_type modparam. Default is VFD. */ if (display_type == IMON_DISPLAY_TYPE_AUTO) { switch (ictx->product) { case 0xffdc: /* set in imon_get_ffdc_type() */ configured_display_type = ictx->display_type; break; case 0x0034: case 0x0035: configured_display_type = IMON_DISPLAY_TYPE_VGA; break; case 0x0038: case 0x0039: case 0x0045: configured_display_type = IMON_DISPLAY_TYPE_LCD; break; case 0x003c: case 0x0041: case 0x0042: case 0x0043: configured_display_type = IMON_DISPLAY_TYPE_NONE; ictx->display_supported = false; break; case 0x0036: case 0x0044: default: configured_display_type = IMON_DISPLAY_TYPE_VFD; break; } } else { configured_display_type = display_type; if (display_type == IMON_DISPLAY_TYPE_NONE) ictx->display_supported = false; else ictx->display_supported = true; dev_info(ictx->dev, "%s: overriding display type to %d via modparam\n", __func__, display_type); } ictx->display_type = configured_display_type; } static struct rc_dev *imon_init_rdev(struct imon_context *ictx) { struct rc_dev *rdev; int ret; static const unsigned char fp_packet[] = { 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88 }; rdev = rc_allocate_device(RC_DRIVER_SCANCODE); if (!rdev) { dev_err(ictx->dev, "remote control dev allocation failed\n"); goto out; } snprintf(ictx->name_rdev, sizeof(ictx->name_rdev), "iMON Remote (%04x:%04x)", ictx->vendor, ictx->product); usb_make_path(ictx->usbdev_intf0, ictx->phys_rdev, sizeof(ictx->phys_rdev)); strlcat(ictx->phys_rdev, "/input0", sizeof(ictx->phys_rdev)); rdev->device_name = ictx->name_rdev; rdev->input_phys = ictx->phys_rdev; usb_to_input_id(ictx->usbdev_intf0, &rdev->input_id); rdev->dev.parent = ictx->dev; rdev->priv = ictx; /* iMON PAD or MCE */ rdev->allowed_protocols = RC_PROTO_BIT_IMON | RC_PROTO_BIT_RC6_MCE; rdev->change_protocol = imon_ir_change_protocol; rdev->driver_name = MOD_NAME; /* Enable front-panel buttons and/or knobs */ memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet)); ret = send_packet(ictx); /* Not fatal, but warn about it */ if (ret) dev_info(ictx->dev, "panel buttons/knobs setup failed\n"); if (ictx->product == 0xffdc) { imon_get_ffdc_type(ictx); rdev->allowed_protocols = ictx->rc_proto; } imon_set_display_type(ictx); if (ictx->rc_proto == RC_PROTO_BIT_RC6_MCE) rdev->map_name = RC_MAP_IMON_MCE; else rdev->map_name = RC_MAP_IMON_PAD; ret = rc_register_device(rdev); if (ret < 0) { dev_err(ictx->dev, "remote input dev register failed\n"); goto out; } return rdev; out: rc_free_device(rdev); return NULL; } static struct input_dev *imon_init_idev(struct imon_context *ictx) { const struct imon_panel_key_table *key_table; struct input_dev *idev; int ret, i; key_table = ictx->dev_descr->key_table; idev = input_allocate_device(); if (!idev) goto out; snprintf(ictx->name_idev, sizeof(ictx->name_idev), "iMON Panel, Knob and Mouse(%04x:%04x)", ictx->vendor, ictx->product); idev->name = ictx->name_idev; usb_make_path(ictx->usbdev_intf0, ictx->phys_idev, sizeof(ictx->phys_idev)); strlcat(ictx->phys_idev, "/input1", sizeof(ictx->phys_idev)); idev->phys = ictx->phys_idev; idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_REL); idev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT); idev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y) | BIT_MASK(REL_WHEEL); /* panel and/or knob code support */ for (i = 0; key_table[i].hw_code != 0; i++) { u32 kc = key_table[i].keycode; __set_bit(kc, idev->keybit); } usb_to_input_id(ictx->usbdev_intf0, &idev->id); idev->dev.parent = ictx->dev; input_set_drvdata(idev, ictx); ret = input_register_device(idev); if (ret < 0) { dev_err(ictx->dev, "input dev register failed\n"); goto out; } return idev; out: input_free_device(idev); return NULL; } static struct input_dev *imon_init_touch(struct imon_context *ictx) { struct input_dev *touch; int ret; touch = input_allocate_device(); if (!touch) goto touch_alloc_failed; snprintf(ictx->name_touch, sizeof(ictx->name_touch), "iMON USB Touchscreen (%04x:%04x)", ictx->vendor, ictx->product); touch->name = ictx->name_touch; usb_make_path(ictx->usbdev_intf1, ictx->phys_touch, sizeof(ictx->phys_touch)); strlcat(ictx->phys_touch, "/input2", sizeof(ictx->phys_touch)); touch->phys = ictx->phys_touch; touch->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); touch->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(touch, ABS_X, 0x00, 0xfff, 0, 0); input_set_abs_params(touch, ABS_Y, 0x00, 0xfff, 0, 0); input_set_drvdata(touch, ictx); usb_to_input_id(ictx->usbdev_intf1, &touch->id); touch->dev.parent = ictx->dev; ret = input_register_device(touch); if (ret < 0) { dev_info(ictx->dev, "touchscreen input dev register failed\n"); goto touch_register_failed; } return touch; touch_register_failed: input_free_device(touch); touch_alloc_failed: return NULL; } static bool imon_find_endpoints(struct imon_context *ictx, struct usb_host_interface *iface_desc) { struct usb_endpoint_descriptor *ep; struct usb_endpoint_descriptor *rx_endpoint = NULL; struct usb_endpoint_descriptor *tx_endpoint = NULL; int ifnum = iface_desc->desc.bInterfaceNumber; int num_endpts = iface_desc->desc.bNumEndpoints; int i, ep_dir, ep_type; bool ir_ep_found = false; bool display_ep_found = false; bool tx_control = false; /* * Scan the endpoint list and set: * first input endpoint = IR endpoint * first output endpoint = display endpoint */ for (i = 0; i < num_endpts && !(ir_ep_found && display_ep_found); ++i) { ep = &iface_desc->endpoint[i].desc; ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK; ep_type = usb_endpoint_type(ep); if (!ir_ep_found && ep_dir == USB_DIR_IN && ep_type == USB_ENDPOINT_XFER_INT) { rx_endpoint = ep; ir_ep_found = true; dev_dbg(ictx->dev, "%s: found IR endpoint\n", __func__); } else if (!display_ep_found && ep_dir == USB_DIR_OUT && ep_type == USB_ENDPOINT_XFER_INT) { tx_endpoint = ep; display_ep_found = true; dev_dbg(ictx->dev, "%s: found display endpoint\n", __func__); } } if (ifnum == 0) { ictx->rx_endpoint_intf0 = rx_endpoint; /* * tx is used to send characters to lcd/vfd, associate RF * remotes, set IR protocol, and maybe more... */ ictx->tx_endpoint = tx_endpoint; } else { ictx->rx_endpoint_intf1 = rx_endpoint; } /* * If we didn't find a display endpoint, this is probably one of the * newer iMON devices that use control urb instead of interrupt */ if (!display_ep_found) { tx_control = true; display_ep_found = true; dev_dbg(ictx->dev, "%s: device uses control endpoint, not interface OUT endpoint\n", __func__); } /* * Some iMON receivers have no display. Unfortunately, it seems * that SoundGraph recycles device IDs between devices both with * and without... :\ */ if (ictx->display_type == IMON_DISPLAY_TYPE_NONE) { display_ep_found = false; dev_dbg(ictx->dev, "%s: device has no display\n", __func__); } /* * iMON Touch devices have a VGA touchscreen, but no "display", as * that refers to e.g. /dev/lcd0 (a character device LCD or VFD). */ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { display_ep_found = false; dev_dbg(ictx->dev, "%s: iMON Touch device found\n", __func__); } /* Input endpoint is mandatory */ if (!ir_ep_found) pr_err("no valid input (IR) endpoint found\n"); ictx->tx_control = tx_control; if (display_ep_found) ictx->display_supported = true; return ir_ep_found; } static struct imon_context *imon_init_intf0(struct usb_interface *intf, const struct usb_device_id *id) { struct imon_context *ictx; struct urb *rx_urb; struct urb *tx_urb; struct device *dev = &intf->dev; struct usb_host_interface *iface_desc; int ret = -ENOMEM; ictx = kzalloc(sizeof(*ictx), GFP_KERNEL); if (!ictx) goto exit; rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!rx_urb) goto rx_urb_alloc_failed; tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!tx_urb) goto tx_urb_alloc_failed; mutex_init(&ictx->lock); spin_lock_init(&ictx->kc_lock); mutex_lock(&ictx->lock); ictx->dev = dev; ictx->usbdev_intf0 = usb_get_dev(interface_to_usbdev(intf)); ictx->rx_urb_intf0 = rx_urb; ictx->tx_urb = tx_urb; ictx->rf_device = false; init_completion(&ictx->tx.finished); ictx->vendor = le16_to_cpu(ictx->usbdev_intf0->descriptor.idVendor); ictx->product = le16_to_cpu(ictx->usbdev_intf0->descriptor.idProduct); /* save drive info for later accessing the panel/knob key table */ ictx->dev_descr = (struct imon_usb_dev_descr *)id->driver_info; /* default send_packet delay is 5ms but some devices need more */ ictx->send_packet_delay = ictx->dev_descr->flags & IMON_NEED_20MS_PKT_DELAY ? 20 : 5; ret = -ENODEV; iface_desc = intf->cur_altsetting; if (!imon_find_endpoints(ictx, iface_desc)) { goto find_endpoint_failed; } usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0, usb_rcvintpipe(ictx->usbdev_intf0, ictx->rx_endpoint_intf0->bEndpointAddress), ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf), usb_rx_callback_intf0, ictx, ictx->rx_endpoint_intf0->bInterval); ret = usb_submit_urb(ictx->rx_urb_intf0, GFP_KERNEL); if (ret) { pr_err("usb_submit_urb failed for intf0 (%d)\n", ret); goto urb_submit_failed; } ictx->idev = imon_init_idev(ictx); if (!ictx->idev) { dev_err(dev, "%s: input device setup failed\n", __func__); goto idev_setup_failed; } ictx->rdev = imon_init_rdev(ictx); if (!ictx->rdev) { dev_err(dev, "%s: rc device setup failed\n", __func__); goto rdev_setup_failed; } ictx->dev_present_intf0 = true; mutex_unlock(&ictx->lock); return ictx; rdev_setup_failed: input_unregister_device(ictx->idev); idev_setup_failed: usb_kill_urb(ictx->rx_urb_intf0); urb_submit_failed: find_endpoint_failed: usb_put_dev(ictx->usbdev_intf0); mutex_unlock(&ictx->lock); usb_free_urb(tx_urb); tx_urb_alloc_failed: usb_free_urb(rx_urb); rx_urb_alloc_failed: kfree(ictx); exit: dev_err(dev, "unable to initialize intf0, err %d\n", ret); return NULL; } static struct imon_context *imon_init_intf1(struct usb_interface *intf, struct imon_context *ictx) { struct urb *rx_urb; struct usb_host_interface *iface_desc; int ret = -ENOMEM; rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!rx_urb) goto rx_urb_alloc_failed; mutex_lock(&ictx->lock); if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { timer_setup(&ictx->ttimer, imon_touch_display_timeout, 0); } ictx->usbdev_intf1 = usb_get_dev(interface_to_usbdev(intf)); ictx->rx_urb_intf1 = rx_urb; ret = -ENODEV; iface_desc = intf->cur_altsetting; if (!imon_find_endpoints(ictx, iface_desc)) goto find_endpoint_failed; if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { ictx->touch = imon_init_touch(ictx); if (!ictx->touch) goto touch_setup_failed; } else ictx->touch = NULL; usb_fill_int_urb(ictx->rx_urb_intf1, ictx->usbdev_intf1, usb_rcvintpipe(ictx->usbdev_intf1, ictx->rx_endpoint_intf1->bEndpointAddress), ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf), usb_rx_callback_intf1, ictx, ictx->rx_endpoint_intf1->bInterval); ret = usb_submit_urb(ictx->rx_urb_intf1, GFP_KERNEL); if (ret) { pr_err("usb_submit_urb failed for intf1 (%d)\n", ret); goto urb_submit_failed; } ictx->dev_present_intf1 = true; mutex_unlock(&ictx->lock); return ictx; urb_submit_failed: if (ictx->touch) input_unregister_device(ictx->touch); touch_setup_failed: find_endpoint_failed: usb_put_dev(ictx->usbdev_intf1); ictx->usbdev_intf1 = NULL; mutex_unlock(&ictx->lock); usb_free_urb(rx_urb); ictx->rx_urb_intf1 = NULL; rx_urb_alloc_failed: dev_err(ictx->dev, "unable to initialize intf1, err %d\n", ret); return NULL; } static void imon_init_display(struct imon_context *ictx, struct usb_interface *intf) { int ret; dev_dbg(ictx->dev, "Registering iMON display with sysfs\n"); /* set up sysfs entry for built-in clock */ ret = sysfs_create_group(&intf->dev.kobj, &imon_display_attr_group); if (ret) dev_err(ictx->dev, "Could not create display sysfs entries(%d)", ret); if (ictx->display_type == IMON_DISPLAY_TYPE_LCD) ret = usb_register_dev(intf, &imon_lcd_class); else ret = usb_register_dev(intf, &imon_vfd_class); if (ret) /* Not a fatal error, so ignore */ dev_info(ictx->dev, "could not get a minor number for display\n"); } /* * Callback function for USB core API: Probe */ static int imon_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *usbdev = NULL; struct usb_host_interface *iface_desc = NULL; struct usb_interface *first_if; struct device *dev = &interface->dev; int ifnum, sysfs_err; int ret = 0; struct imon_context *ictx = NULL; u16 vendor, product; usbdev = usb_get_dev(interface_to_usbdev(interface)); iface_desc = interface->cur_altsetting; ifnum = iface_desc->desc.bInterfaceNumber; vendor = le16_to_cpu(usbdev->descriptor.idVendor); product = le16_to_cpu(usbdev->descriptor.idProduct); dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n", __func__, vendor, product, ifnum); first_if = usb_ifnum_to_if(usbdev, 0); if (!first_if) { ret = -ENODEV; goto fail; } if (first_if->dev.driver != interface->dev.driver) { dev_err(&interface->dev, "inconsistent driver matching\n"); ret = -EINVAL; goto fail; } if (ifnum == 0) { ictx = imon_init_intf0(interface, id); if (!ictx) { pr_err("failed to initialize context!\n"); ret = -ENODEV; goto fail; } refcount_set(&ictx->users, 1); } else { /* this is the secondary interface on the device */ struct imon_context *first_if_ctx = usb_get_intfdata(first_if); /* fail early if first intf failed to register */ if (!first_if_ctx) { ret = -ENODEV; goto fail; } ictx = imon_init_intf1(interface, first_if_ctx); if (!ictx) { pr_err("failed to attach to context!\n"); ret = -ENODEV; goto fail; } refcount_inc(&ictx->users); } usb_set_intfdata(interface, ictx); if (ifnum == 0) { if (product == 0xffdc && ictx->rf_device) { sysfs_err = sysfs_create_group(&interface->dev.kobj, &imon_rf_attr_group); if (sysfs_err) pr_err("Could not create RF sysfs entries(%d)\n", sysfs_err); } if (ictx->display_supported) imon_init_display(ictx, interface); } dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n", vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum); usb_put_dev(usbdev); return 0; fail: usb_put_dev(usbdev); dev_err(dev, "unable to register, err %d\n", ret); return ret; } /* * Callback function for USB core API: disconnect */ static void imon_disconnect(struct usb_interface *interface) { struct imon_context *ictx; struct device *dev; int ifnum; ictx = usb_get_intfdata(interface); ictx->disconnected = true; dev = ictx->dev; ifnum = interface->cur_altsetting->desc.bInterfaceNumber; /* * sysfs_remove_group is safe to call even if sysfs_create_group * hasn't been called */ sysfs_remove_group(&interface->dev.kobj, &imon_display_attr_group); sysfs_remove_group(&interface->dev.kobj, &imon_rf_attr_group); usb_set_intfdata(interface, NULL); /* Abort ongoing write */ if (ictx->tx.busy) { usb_kill_urb(ictx->tx_urb); complete(&ictx->tx.finished); } if (ifnum == 0) { ictx->dev_present_intf0 = false; usb_kill_urb(ictx->rx_urb_intf0); input_unregister_device(ictx->idev); rc_unregister_device(ictx->rdev); if (ictx->display_supported) { if (ictx->display_type == IMON_DISPLAY_TYPE_LCD) usb_deregister_dev(interface, &imon_lcd_class); else if (ictx->display_type == IMON_DISPLAY_TYPE_VFD) usb_deregister_dev(interface, &imon_vfd_class); } usb_put_dev(ictx->usbdev_intf0); } else { ictx->dev_present_intf1 = false; usb_kill_urb(ictx->rx_urb_intf1); if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { timer_delete_sync(&ictx->ttimer); input_unregister_device(ictx->touch); } usb_put_dev(ictx->usbdev_intf1); } if (refcount_dec_and_test(&ictx->users)) free_imon_context(ictx); dev_dbg(dev, "%s: iMON device (intf%d) disconnected\n", __func__, ifnum); } static int imon_suspend(struct usb_interface *intf, pm_message_t message) { struct imon_context *ictx = usb_get_intfdata(intf); int ifnum = intf->cur_altsetting->desc.bInterfaceNumber; if (ifnum == 0) usb_kill_urb(ictx->rx_urb_intf0); else usb_kill_urb(ictx->rx_urb_intf1); return 0; } static int imon_resume(struct usb_interface *intf) { int rc = 0; struct imon_context *ictx = usb_get_intfdata(intf); int ifnum = intf->cur_altsetting->desc.bInterfaceNumber; if (ifnum == 0) { usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0, usb_rcvintpipe(ictx->usbdev_intf0, ictx->rx_endpoint_intf0->bEndpointAddress), ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf), usb_rx_callback_intf0, ictx, ictx->rx_endpoint_intf0->bInterval); rc = usb_submit_urb(ictx->rx_urb_intf0, GFP_NOIO); } else { usb_fill_int_urb(ictx->rx_urb_intf1, ictx->usbdev_intf1, usb_rcvintpipe(ictx->usbdev_intf1, ictx->rx_endpoint_intf1->bEndpointAddress), ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf), usb_rx_callback_intf1, ictx, ictx->rx_endpoint_intf1->bInterval); rc = usb_submit_urb(ictx->rx_urb_intf1, GFP_NOIO); } return rc; } module_usb_driver(imon_driver); |
| 2 5 3 2 2 2 1 1 1 1 2 1 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 | // SPDX-License-Identifier: GPL-2.0+ /* * USB Cypress M8 driver * * Copyright (C) 2004 * Lonnie Mendez (dignome@gmail.com) * Copyright (C) 2003,2004 * Neil Whelchel (koyama@firstlight.net) * * See Documentation/usb/usb-serial.rst for more information on using this * driver * * See http://geocities.com/i0xox0i for information on this driver and the * earthmate usb device. */ /* Thanks to Neil Whelchel for writing the first cypress m8 implementation for linux. */ /* Thanks to cypress for providing references for the hid reports. */ /* Thanks to Jiang Zhang for providing links and for general help. */ /* Code originates and was built up from ftdi_sio, belkin, pl2303 and others.*/ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/spinlock.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial.h> #include <linux/kfifo.h> #include <linux/delay.h> #include <linux/uaccess.h> #include <linux/unaligned.h> #include "cypress_m8.h" static bool stats; static int interval; static bool unstable_bauds; #define DRIVER_AUTHOR "Lonnie Mendez <dignome@gmail.com>, Neil Whelchel <koyama@firstlight.net>" #define DRIVER_DESC "Cypress USB to Serial Driver" /* write buffer size defines */ #define CYPRESS_BUF_SIZE 1024 static const struct usb_device_id id_table_earthmate[] = { { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, { } /* Terminating entry */ }; static const struct usb_device_id id_table_cyphidcomrs232[] = { { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { } /* Terminating entry */ }; static const struct usb_device_id id_table_nokiaca42v2[] = { { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, { } /* Terminating entry */ }; static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table_combined); enum packet_format { packet_format_1, /* b0:status, b1:payload count */ packet_format_2 /* b0[7:3]:status, b0[2:0]:payload count */ }; struct cypress_private { spinlock_t lock; /* private lock */ int chiptype; /* identifier of device, for quirks/etc */ int bytes_in; /* used for statistics */ int bytes_out; /* used for statistics */ int cmd_count; /* used for statistics */ int cmd_ctrl; /* always set this to 1 before issuing a command */ struct kfifo write_fifo; /* write fifo */ int write_urb_in_use; /* write urb in use indicator */ int write_urb_interval; /* interval to use for write urb */ int read_urb_interval; /* interval to use for read urb */ int comm_is_ok; /* true if communication is (still) ok */ __u8 line_control; /* holds dtr / rts value */ __u8 current_status; /* received from last read - info on dsr,cts,cd,ri,etc */ __u8 current_config; /* stores the current configuration byte */ __u8 rx_flags; /* throttling - used from whiteheat/ftdi_sio */ enum packet_format pkt_fmt; /* format to use for packet send / receive */ int get_cfg_unsafe; /* If true, the CYPRESS_GET_CONFIG is unsafe */ int baud_rate; /* stores current baud rate in integer form */ char prev_status; /* used for TIOCMIWAIT */ }; /* function prototypes for the Cypress USB to serial device */ static int cypress_earthmate_port_probe(struct usb_serial_port *port); static int cypress_hidcom_port_probe(struct usb_serial_port *port); static int cypress_ca42v2_port_probe(struct usb_serial_port *port); static void cypress_port_remove(struct usb_serial_port *port); static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port); static void cypress_close(struct usb_serial_port *port); static void cypress_dtr_rts(struct usb_serial_port *port, int on); static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); static void cypress_send(struct usb_serial_port *port); static unsigned int cypress_write_room(struct tty_struct *tty); static void cypress_earthmate_init_termios(struct tty_struct *tty); static void cypress_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios); static int cypress_tiocmget(struct tty_struct *tty); static int cypress_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static unsigned int cypress_chars_in_buffer(struct tty_struct *tty); static void cypress_throttle(struct tty_struct *tty); static void cypress_unthrottle(struct tty_struct *tty); static void cypress_set_dead(struct usb_serial_port *port); static void cypress_read_int_callback(struct urb *urb); static void cypress_write_int_callback(struct urb *urb); static struct usb_serial_driver cypress_earthmate_device = { .driver = { .name = "earthmate", }, .description = "DeLorme Earthmate USB", .id_table = id_table_earthmate, .num_ports = 1, .port_probe = cypress_earthmate_port_probe, .port_remove = cypress_port_remove, .open = cypress_open, .close = cypress_close, .dtr_rts = cypress_dtr_rts, .write = cypress_write, .write_room = cypress_write_room, .init_termios = cypress_earthmate_init_termios, .set_termios = cypress_set_termios, .tiocmget = cypress_tiocmget, .tiocmset = cypress_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .chars_in_buffer = cypress_chars_in_buffer, .throttle = cypress_throttle, .unthrottle = cypress_unthrottle, .read_int_callback = cypress_read_int_callback, .write_int_callback = cypress_write_int_callback, }; static struct usb_serial_driver cypress_hidcom_device = { .driver = { .name = "cyphidcom", }, .description = "HID->COM RS232 Adapter", .id_table = id_table_cyphidcomrs232, .num_ports = 1, .port_probe = cypress_hidcom_port_probe, .port_remove = cypress_port_remove, .open = cypress_open, .close = cypress_close, .dtr_rts = cypress_dtr_rts, .write = cypress_write, .write_room = cypress_write_room, .set_termios = cypress_set_termios, .tiocmget = cypress_tiocmget, .tiocmset = cypress_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .chars_in_buffer = cypress_chars_in_buffer, .throttle = cypress_throttle, .unthrottle = cypress_unthrottle, .read_int_callback = cypress_read_int_callback, .write_int_callback = cypress_write_int_callback, }; static struct usb_serial_driver cypress_ca42v2_device = { .driver = { .name = "nokiaca42v2", }, .description = "Nokia CA-42 V2 Adapter", .id_table = id_table_nokiaca42v2, .num_ports = 1, .port_probe = cypress_ca42v2_port_probe, .port_remove = cypress_port_remove, .open = cypress_open, .close = cypress_close, .dtr_rts = cypress_dtr_rts, .write = cypress_write, .write_room = cypress_write_room, .set_termios = cypress_set_termios, .tiocmget = cypress_tiocmget, .tiocmset = cypress_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .chars_in_buffer = cypress_chars_in_buffer, .throttle = cypress_throttle, .unthrottle = cypress_unthrottle, .read_int_callback = cypress_read_int_callback, .write_int_callback = cypress_write_int_callback, }; static struct usb_serial_driver * const serial_drivers[] = { &cypress_earthmate_device, &cypress_hidcom_device, &cypress_ca42v2_device, NULL }; /***************************************************************************** * Cypress serial helper functions *****************************************************************************/ /* FRWD Dongle hidcom needs to skip reset and speed checks */ static inline bool is_frwd(struct usb_device *dev) { return ((le16_to_cpu(dev->descriptor.idVendor) == VENDOR_ID_FRWD) && (le16_to_cpu(dev->descriptor.idProduct) == PRODUCT_ID_CYPHIDCOM_FRWD)); } static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate) { struct cypress_private *priv; priv = usb_get_serial_port_data(port); if (unstable_bauds) return new_rate; /* FRWD Dongle uses 115200 bps */ if (is_frwd(port->serial->dev)) return new_rate; /* * The general purpose firmware for the Cypress M8 allows for * a maximum speed of 57600bps (I have no idea whether DeLorme * chose to use the general purpose firmware or not), if you * need to modify this speed setting for your own project * please add your own chiptype and modify the code likewise. * The Cypress HID->COM device will work successfully up to * 115200bps (but the actual throughput is around 3kBps). */ if (port->serial->dev->speed == USB_SPEED_LOW) { /* * Mike Isely <isely@pobox.com> 2-Feb-2008: The * Cypress app note that describes this mechanism * states that the low-speed part can't handle more * than 800 bytes/sec, in which case 4800 baud is the * safest speed for a part like that. */ if (new_rate > 4800) { dev_dbg(&port->dev, "%s - failed setting baud rate, device incapable speed %d\n", __func__, new_rate); return -1; } } switch (priv->chiptype) { case CT_EARTHMATE: if (new_rate <= 600) { /* 300 and 600 baud rates are supported under * the generic firmware, but are not used with * NMEA and SiRF protocols */ dev_dbg(&port->dev, "%s - failed setting baud rate, unsupported speed of %d on Earthmate GPS\n", __func__, new_rate); return -1; } break; default: break; } return new_rate; } /* This function can either set or retrieve the current serial line settings */ static int cypress_serial_control(struct tty_struct *tty, struct usb_serial_port *port, speed_t baud_rate, int data_bits, int stop_bits, int parity_enable, int parity_type, int reset, int cypress_request_type) { int new_baudrate = 0, retval = 0, tries = 0; struct cypress_private *priv; struct device *dev = &port->dev; u8 *feature_buffer; const unsigned int feature_len = 5; unsigned long flags; priv = usb_get_serial_port_data(port); if (!priv->comm_is_ok) return -ENODEV; feature_buffer = kcalloc(feature_len, sizeof(u8), GFP_KERNEL); if (!feature_buffer) return -ENOMEM; switch (cypress_request_type) { case CYPRESS_SET_CONFIG: /* 0 means 'Hang up' so doesn't change the true bit rate */ new_baudrate = priv->baud_rate; if (baud_rate && baud_rate != priv->baud_rate) { dev_dbg(dev, "%s - baud rate is changing\n", __func__); retval = analyze_baud_rate(port, baud_rate); if (retval >= 0) { new_baudrate = retval; dev_dbg(dev, "%s - New baud rate set to %d\n", __func__, new_baudrate); } } dev_dbg(dev, "%s - baud rate is being sent as %d\n", __func__, new_baudrate); /* fill the feature_buffer with new configuration */ put_unaligned_le32(new_baudrate, feature_buffer); feature_buffer[4] |= data_bits - 5; /* assign data bits in 2 bit space ( max 3 ) */ /* 1 bit gap */ feature_buffer[4] |= (stop_bits << 3); /* assign stop bits in 1 bit space */ feature_buffer[4] |= (parity_enable << 4); /* assign parity flag in 1 bit space */ feature_buffer[4] |= (parity_type << 5); /* assign parity type in 1 bit space */ /* 1 bit gap */ feature_buffer[4] |= (reset << 7); /* assign reset at end of byte, 1 bit space */ dev_dbg(dev, "%s - device is being sent this feature report:\n", __func__); dev_dbg(dev, "%s - %02X - %02X - %02X - %02X - %02X\n", __func__, feature_buffer[0], feature_buffer[1], feature_buffer[2], feature_buffer[3], feature_buffer[4]); do { retval = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), HID_REQ_SET_REPORT, USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS, 0x0300, 0, feature_buffer, feature_len, 500); if (tries++ >= 3) break; } while (retval != feature_len && retval != -ENODEV); if (retval != feature_len) { dev_err(dev, "%s - failed sending serial line settings - %d\n", __func__, retval); cypress_set_dead(port); } else { spin_lock_irqsave(&priv->lock, flags); priv->baud_rate = new_baudrate; priv->current_config = feature_buffer[4]; spin_unlock_irqrestore(&priv->lock, flags); /* If we asked for a speed change encode it */ if (baud_rate) tty_encode_baud_rate(tty, new_baudrate, new_baudrate); } break; case CYPRESS_GET_CONFIG: if (priv->get_cfg_unsafe) { /* Not implemented for this device, and if we try to do it we're likely to crash the hardware. */ retval = -ENOTTY; goto out; } dev_dbg(dev, "%s - retrieving serial line settings\n", __func__); do { retval = usb_control_msg(port->serial->dev, usb_rcvctrlpipe(port->serial->dev, 0), HID_REQ_GET_REPORT, USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_CLASS, 0x0300, 0, feature_buffer, feature_len, 500); if (tries++ >= 3) break; } while (retval != feature_len && retval != -ENODEV); if (retval != feature_len) { dev_err(dev, "%s - failed to retrieve serial line settings - %d\n", __func__, retval); cypress_set_dead(port); goto out; } else { spin_lock_irqsave(&priv->lock, flags); /* store the config in one byte, and later use bit masks to check values */ priv->current_config = feature_buffer[4]; priv->baud_rate = get_unaligned_le32(feature_buffer); spin_unlock_irqrestore(&priv->lock, flags); } } spin_lock_irqsave(&priv->lock, flags); ++priv->cmd_count; spin_unlock_irqrestore(&priv->lock, flags); out: kfree(feature_buffer); return retval; } /* cypress_serial_control */ static void cypress_set_dead(struct usb_serial_port *port) { struct cypress_private *priv = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&priv->lock, flags); if (!priv->comm_is_ok) { spin_unlock_irqrestore(&priv->lock, flags); return; } priv->comm_is_ok = 0; spin_unlock_irqrestore(&priv->lock, flags); dev_err(&port->dev, "cypress_m8 suspending failing port %d - " "interval might be too short\n", port->port_number); } /***************************************************************************** * Cypress serial driver functions *****************************************************************************/ static int cypress_generic_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct cypress_private *priv; if (!port->interrupt_out_urb || !port->interrupt_in_urb) { dev_err(&port->dev, "required endpoint is missing\n"); return -ENODEV; } priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL); if (!priv) return -ENOMEM; priv->comm_is_ok = !0; spin_lock_init(&priv->lock); if (kfifo_alloc(&priv->write_fifo, CYPRESS_BUF_SIZE, GFP_KERNEL)) { kfree(priv); return -ENOMEM; } /* Skip reset for FRWD device. It is a workaound: device hangs if it receives SET_CONFIGURE in Configured state. */ if (!is_frwd(serial->dev)) usb_reset_configuration(serial->dev); priv->cmd_ctrl = 0; priv->line_control = 0; priv->rx_flags = 0; /* Default packet format setting is determined by packet size. Anything with a size larger then 9 must have a separate count field since the 3 bit count field is otherwise too small. Otherwise we can use the slightly more compact format. This is in accordance with the cypress_m8 serial converter app note. */ if (port->interrupt_out_size > 9) priv->pkt_fmt = packet_format_1; else priv->pkt_fmt = packet_format_2; if (interval > 0) { priv->write_urb_interval = interval; priv->read_urb_interval = interval; dev_dbg(&port->dev, "%s - read & write intervals forced to %d\n", __func__, interval); } else { priv->write_urb_interval = port->interrupt_out_urb->interval; priv->read_urb_interval = port->interrupt_in_urb->interval; dev_dbg(&port->dev, "%s - intervals: read=%d write=%d\n", __func__, priv->read_urb_interval, priv->write_urb_interval); } usb_set_serial_port_data(port, priv); port->port.drain_delay = 256; return 0; } static int cypress_earthmate_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct cypress_private *priv; int ret; ret = cypress_generic_port_probe(port); if (ret) { dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__); return ret; } priv = usb_get_serial_port_data(port); priv->chiptype = CT_EARTHMATE; /* All Earthmate devices use the separated-count packet format! Idiotic. */ priv->pkt_fmt = packet_format_1; if (serial->dev->descriptor.idProduct != cpu_to_le16(PRODUCT_ID_EARTHMATEUSB)) { /* The old original USB Earthmate seemed able to handle GET_CONFIG requests; everything they've produced since that time crashes if this command is attempted :-( */ dev_dbg(&port->dev, "%s - Marking this device as unsafe for GET_CONFIG commands\n", __func__); priv->get_cfg_unsafe = !0; } return 0; } static int cypress_hidcom_port_probe(struct usb_serial_port *port) { struct cypress_private *priv; int ret; ret = cypress_generic_port_probe(port); if (ret) { dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__); return ret; } priv = usb_get_serial_port_data(port); priv->chiptype = CT_CYPHIDCOM; return 0; } static int cypress_ca42v2_port_probe(struct usb_serial_port *port) { struct cypress_private *priv; int ret; ret = cypress_generic_port_probe(port); if (ret) { dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__); return ret; } priv = usb_get_serial_port_data(port); priv->chiptype = CT_CA42V2; return 0; } static void cypress_port_remove(struct usb_serial_port *port) { struct cypress_private *priv; priv = usb_get_serial_port_data(port); kfifo_free(&priv->write_fifo); kfree(priv); } static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port) { struct cypress_private *priv = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; unsigned long flags; int result = 0; if (!priv->comm_is_ok) return -EIO; /* clear halts before open */ usb_clear_halt(serial->dev, 0x81); usb_clear_halt(serial->dev, 0x02); spin_lock_irqsave(&priv->lock, flags); /* reset read/write statistics */ priv->bytes_in = 0; priv->bytes_out = 0; priv->cmd_count = 0; priv->rx_flags = 0; spin_unlock_irqrestore(&priv->lock, flags); /* Set termios */ cypress_send(port); if (tty) cypress_set_termios(tty, port, NULL); /* setup the port and start reading from the device */ usb_fill_int_urb(port->interrupt_in_urb, serial->dev, usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress), port->interrupt_in_urb->transfer_buffer, port->interrupt_in_urb->transfer_buffer_length, cypress_read_int_callback, port, priv->read_urb_interval); result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) { dev_err(&port->dev, "%s - failed submitting read urb, error %d\n", __func__, result); cypress_set_dead(port); } return result; } /* cypress_open */ static void cypress_dtr_rts(struct usb_serial_port *port, int on) { struct cypress_private *priv = usb_get_serial_port_data(port); /* drop dtr and rts */ spin_lock_irq(&priv->lock); if (on == 0) priv->line_control = 0; else priv->line_control = CONTROL_DTR | CONTROL_RTS; priv->cmd_ctrl = 1; spin_unlock_irq(&priv->lock); cypress_write(NULL, port, NULL, 0); } static void cypress_close(struct usb_serial_port *port) { struct cypress_private *priv = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&priv->lock, flags); kfifo_reset_out(&priv->write_fifo); spin_unlock_irqrestore(&priv->lock, flags); dev_dbg(&port->dev, "%s - stopping urbs\n", __func__); usb_kill_urb(port->interrupt_in_urb); usb_kill_urb(port->interrupt_out_urb); if (stats) dev_info(&port->dev, "Statistics: %d Bytes In | %d Bytes Out | %d Commands Issued\n", priv->bytes_in, priv->bytes_out, priv->cmd_count); } /* cypress_close */ static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { struct cypress_private *priv = usb_get_serial_port_data(port); dev_dbg(&port->dev, "%s - %d bytes\n", __func__, count); /* line control commands, which need to be executed immediately, are not put into the buffer for obvious reasons. */ if (priv->cmd_ctrl) { count = 0; goto finish; } if (!count) return count; count = kfifo_in_locked(&priv->write_fifo, buf, count, &priv->lock); finish: cypress_send(port); return count; } /* cypress_write */ static void cypress_send(struct usb_serial_port *port) { int count = 0, result, offset, actual_size; struct cypress_private *priv = usb_get_serial_port_data(port); struct device *dev = &port->dev; unsigned long flags; if (!priv->comm_is_ok) return; dev_dbg(dev, "%s - interrupt out size is %d\n", __func__, port->interrupt_out_size); spin_lock_irqsave(&priv->lock, flags); if (priv->write_urb_in_use) { dev_dbg(dev, "%s - can't write, urb in use\n", __func__); spin_unlock_irqrestore(&priv->lock, flags); return; } spin_unlock_irqrestore(&priv->lock, flags); /* clear buffer */ memset(port->interrupt_out_urb->transfer_buffer, 0, port->interrupt_out_size); spin_lock_irqsave(&priv->lock, flags); switch (priv->pkt_fmt) { default: case packet_format_1: /* this is for the CY7C64013... */ offset = 2; port->interrupt_out_buffer[0] = priv->line_control; break; case packet_format_2: /* this is for the CY7C63743... */ offset = 1; port->interrupt_out_buffer[0] = priv->line_control; break; } if (priv->line_control & CONTROL_RESET) priv->line_control &= ~CONTROL_RESET; if (priv->cmd_ctrl) { priv->cmd_count++; dev_dbg(dev, "%s - line control command being issued\n", __func__); spin_unlock_irqrestore(&priv->lock, flags); goto send; } else spin_unlock_irqrestore(&priv->lock, flags); count = kfifo_out_locked(&priv->write_fifo, &port->interrupt_out_buffer[offset], port->interrupt_out_size - offset, &priv->lock); if (count == 0) return; switch (priv->pkt_fmt) { default: case packet_format_1: port->interrupt_out_buffer[1] = count; break; case packet_format_2: port->interrupt_out_buffer[0] |= count; } dev_dbg(dev, "%s - count is %d\n", __func__, count); send: spin_lock_irqsave(&priv->lock, flags); priv->write_urb_in_use = 1; spin_unlock_irqrestore(&priv->lock, flags); if (priv->cmd_ctrl) actual_size = 1; else actual_size = count + (priv->pkt_fmt == packet_format_1 ? 2 : 1); usb_serial_debug_data(dev, __func__, port->interrupt_out_size, port->interrupt_out_urb->transfer_buffer); usb_fill_int_urb(port->interrupt_out_urb, port->serial->dev, usb_sndintpipe(port->serial->dev, port->interrupt_out_endpointAddress), port->interrupt_out_buffer, actual_size, cypress_write_int_callback, port, priv->write_urb_interval); result = usb_submit_urb(port->interrupt_out_urb, GFP_ATOMIC); if (result) { dev_err_console(port, "%s - failed submitting write urb, error %d\n", __func__, result); priv->write_urb_in_use = 0; cypress_set_dead(port); } spin_lock_irqsave(&priv->lock, flags); if (priv->cmd_ctrl) priv->cmd_ctrl = 0; /* do not count the line control and size bytes */ priv->bytes_out += count; spin_unlock_irqrestore(&priv->lock, flags); usb_serial_port_softint(port); } /* cypress_send */ /* returns how much space is available in the soft buffer */ static unsigned int cypress_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct cypress_private *priv = usb_get_serial_port_data(port); unsigned int room; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); room = kfifo_avail(&priv->write_fifo); spin_unlock_irqrestore(&priv->lock, flags); dev_dbg(&port->dev, "%s - returns %u\n", __func__, room); return room; } static int cypress_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct cypress_private *priv = usb_get_serial_port_data(port); __u8 status, control; unsigned int result = 0; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); control = priv->line_control; status = priv->current_status; spin_unlock_irqrestore(&priv->lock, flags); result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0) | ((control & CONTROL_RTS) ? TIOCM_RTS : 0) | ((status & UART_CTS) ? TIOCM_CTS : 0) | ((status & UART_DSR) ? TIOCM_DSR : 0) | ((status & UART_RI) ? TIOCM_RI : 0) | ((status & UART_CD) ? TIOCM_CD : 0); dev_dbg(&port->dev, "%s - result = %x\n", __func__, result); return result; } static int cypress_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct cypress_private *priv = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&priv->lock, flags); if (set & TIOCM_RTS) priv->line_control |= CONTROL_RTS; if (set & TIOCM_DTR) priv->line_control |= CONTROL_DTR; if (clear & TIOCM_RTS) priv->line_control &= ~CONTROL_RTS; if (clear & TIOCM_DTR) priv->line_control &= ~CONTROL_DTR; priv->cmd_ctrl = 1; spin_unlock_irqrestore(&priv->lock, flags); return cypress_write(tty, port, NULL, 0); } static void cypress_earthmate_init_termios(struct tty_struct *tty) { tty_encode_baud_rate(tty, 4800, 4800); } static void cypress_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct cypress_private *priv = usb_get_serial_port_data(port); struct device *dev = &port->dev; int data_bits, stop_bits, parity_type, parity_enable; unsigned int cflag; unsigned long flags; __u8 oldlines; int linechange = 0; /* Unsupported features need clearing */ tty->termios.c_cflag &= ~(CMSPAR|CRTSCTS); cflag = tty->termios.c_cflag; /* set number of data bits, parity, stop bits */ /* when parity is disabled the parity type bit is ignored */ /* 1 means 2 stop bits, 0 means 1 stop bit */ stop_bits = cflag & CSTOPB ? 1 : 0; if (cflag & PARENB) { parity_enable = 1; /* 1 means odd parity, 0 means even parity */ parity_type = cflag & PARODD ? 1 : 0; } else parity_enable = parity_type = 0; data_bits = tty_get_char_size(cflag); spin_lock_irqsave(&priv->lock, flags); oldlines = priv->line_control; if ((cflag & CBAUD) == B0) { /* drop dtr and rts */ dev_dbg(dev, "%s - dropping the lines, baud rate 0bps\n", __func__); priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS); } else priv->line_control = (CONTROL_DTR | CONTROL_RTS); spin_unlock_irqrestore(&priv->lock, flags); dev_dbg(dev, "%s - sending %d stop_bits, %d parity_enable, %d parity_type, %d data_bits (+5)\n", __func__, stop_bits, parity_enable, parity_type, data_bits); cypress_serial_control(tty, port, tty_get_baud_rate(tty), data_bits, stop_bits, parity_enable, parity_type, 0, CYPRESS_SET_CONFIG); /* we perform a CYPRESS_GET_CONFIG so that the current settings are * filled into the private structure this should confirm that all is * working if it returns what we just set */ cypress_serial_control(tty, port, 0, 0, 0, 0, 0, 0, CYPRESS_GET_CONFIG); /* Here we can define custom tty settings for devices; the main tty * termios flag base comes from empeg.c */ spin_lock_irqsave(&priv->lock, flags); if (priv->chiptype == CT_EARTHMATE && priv->baud_rate == 4800) { dev_dbg(dev, "Using custom termios settings for a baud rate of 4800bps.\n"); /* define custom termios settings for NMEA protocol */ tty->termios.c_iflag /* input modes - */ &= ~(IGNBRK /* disable ignore break */ | BRKINT /* disable break causes interrupt */ | PARMRK /* disable mark parity errors */ | ISTRIP /* disable clear high bit of input char */ | INLCR /* disable translate NL to CR */ | IGNCR /* disable ignore CR */ | ICRNL /* disable translate CR to NL */ | IXON); /* disable enable XON/XOFF flow control */ tty->termios.c_oflag /* output modes */ &= ~OPOST; /* disable postprocess output char */ tty->termios.c_lflag /* line discipline modes */ &= ~(ECHO /* disable echo input characters */ | ECHONL /* disable echo new line */ | ICANON /* disable erase, kill, werase, and rprnt special characters */ | ISIG /* disable interrupt, quit, and suspend special characters */ | IEXTEN); /* disable non-POSIX special characters */ } /* CT_CYPHIDCOM: Application should handle this for device */ linechange = (priv->line_control != oldlines); spin_unlock_irqrestore(&priv->lock, flags); /* if necessary, set lines */ if (linechange) { priv->cmd_ctrl = 1; cypress_write(tty, port, NULL, 0); } } /* cypress_set_termios */ /* returns amount of data still left in soft buffer */ static unsigned int cypress_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct cypress_private *priv = usb_get_serial_port_data(port); unsigned int chars; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); chars = kfifo_len(&priv->write_fifo); spin_unlock_irqrestore(&priv->lock, flags); dev_dbg(&port->dev, "%s - returns %u\n", __func__, chars); return chars; } static void cypress_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct cypress_private *priv = usb_get_serial_port_data(port); spin_lock_irq(&priv->lock); priv->rx_flags = THROTTLED; spin_unlock_irq(&priv->lock); } static void cypress_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct cypress_private *priv = usb_get_serial_port_data(port); int actually_throttled, result; spin_lock_irq(&priv->lock); actually_throttled = priv->rx_flags & ACTUALLY_THROTTLED; priv->rx_flags = 0; spin_unlock_irq(&priv->lock); if (!priv->comm_is_ok) return; if (actually_throttled) { result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) { dev_err(&port->dev, "%s - failed submitting read urb, " "error %d\n", __func__, result); cypress_set_dead(port); } } } static void cypress_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct cypress_private *priv = usb_get_serial_port_data(port); struct device *dev = &urb->dev->dev; struct tty_struct *tty; unsigned char *data = urb->transfer_buffer; unsigned long flags; char tty_flag = TTY_NORMAL; int bytes = 0; int result; int i = 0; int status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* precursor to disconnect so just go away */ return; case -EPIPE: /* Can't call usb_clear_halt while in_interrupt */ fallthrough; default: /* something ugly is going on... */ dev_err(dev, "%s - unexpected nonzero read status received: %d\n", __func__, status); cypress_set_dead(port); return; } spin_lock_irqsave(&priv->lock, flags); if (priv->rx_flags & THROTTLED) { dev_dbg(dev, "%s - now throttling\n", __func__); priv->rx_flags |= ACTUALLY_THROTTLED; spin_unlock_irqrestore(&priv->lock, flags); return; } spin_unlock_irqrestore(&priv->lock, flags); tty = tty_port_tty_get(&port->port); if (!tty) { dev_dbg(dev, "%s - bad tty pointer - exiting\n", __func__); return; } spin_lock_irqsave(&priv->lock, flags); result = urb->actual_length; switch (priv->pkt_fmt) { default: case packet_format_1: /* This is for the CY7C64013... */ priv->current_status = data[0] & 0xF8; bytes = data[1] + 2; i = 2; break; case packet_format_2: /* This is for the CY7C63743... */ priv->current_status = data[0] & 0xF8; bytes = (data[0] & 0x07) + 1; i = 1; break; } spin_unlock_irqrestore(&priv->lock, flags); if (result < bytes) { dev_dbg(dev, "%s - wrong packet size - received %d bytes but packet said %d bytes\n", __func__, result, bytes); goto continue_read; } usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data); spin_lock_irqsave(&priv->lock, flags); /* check to see if status has changed */ if (priv->current_status != priv->prev_status) { u8 delta = priv->current_status ^ priv->prev_status; if (delta & UART_MSR_MASK) { if (delta & UART_CTS) port->icount.cts++; if (delta & UART_DSR) port->icount.dsr++; if (delta & UART_RI) port->icount.rng++; if (delta & UART_CD) port->icount.dcd++; wake_up_interruptible(&port->port.delta_msr_wait); } priv->prev_status = priv->current_status; } spin_unlock_irqrestore(&priv->lock, flags); /* hangup, as defined in acm.c... this might be a bad place for it * though */ if (tty && !C_CLOCAL(tty) && !(priv->current_status & UART_CD)) { dev_dbg(dev, "%s - calling hangup\n", __func__); tty_hangup(tty); goto continue_read; } /* There is one error bit... I'm assuming it is a parity error * indicator as the generic firmware will set this bit to 1 if a * parity error occurs. * I can not find reference to any other error events. */ spin_lock_irqsave(&priv->lock, flags); if (priv->current_status & CYP_ERROR) { spin_unlock_irqrestore(&priv->lock, flags); tty_flag = TTY_PARITY; dev_dbg(dev, "%s - Parity Error detected\n", __func__); } else spin_unlock_irqrestore(&priv->lock, flags); /* process read if there is data other than line status */ if (bytes > i) { tty_insert_flip_string_fixed_flag(&port->port, data + i, tty_flag, bytes - i); tty_flip_buffer_push(&port->port); } spin_lock_irqsave(&priv->lock, flags); /* control and status byte(s) are also counted */ priv->bytes_in += bytes; spin_unlock_irqrestore(&priv->lock, flags); continue_read: tty_kref_put(tty); /* Continue trying to always read */ if (priv->comm_is_ok) { usb_fill_int_urb(port->interrupt_in_urb, port->serial->dev, usb_rcvintpipe(port->serial->dev, port->interrupt_in_endpointAddress), port->interrupt_in_urb->transfer_buffer, port->interrupt_in_urb->transfer_buffer_length, cypress_read_int_callback, port, priv->read_urb_interval); result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); if (result && result != -EPERM) { dev_err(dev, "%s - failed resubmitting read urb, error %d\n", __func__, result); cypress_set_dead(port); } } } /* cypress_read_int_callback */ static void cypress_write_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct cypress_private *priv = usb_get_serial_port_data(port); struct device *dev = &urb->dev->dev; int status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status); priv->write_urb_in_use = 0; return; case -EPIPE: /* Cannot call usb_clear_halt while in_interrupt */ fallthrough; default: dev_err(dev, "%s - unexpected nonzero write status received: %d\n", __func__, status); cypress_set_dead(port); break; } priv->write_urb_in_use = 0; /* send any buffered data */ cypress_send(port); } module_usb_serial_driver(serial_drivers, id_table_combined); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(stats, bool, 0644); MODULE_PARM_DESC(stats, "Enable statistics or not"); module_param(interval, int, 0644); MODULE_PARM_DESC(interval, "Overrides interrupt interval"); module_param(unstable_bauds, bool, 0644); MODULE_PARM_DESC(unstable_bauds, "Allow unstable baud rates"); |
| 3 3 3 3 3 10 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 | // SPDX-License-Identifier: GPL-2.0-or-later /* Instantiate a public key crypto key from an X.509 Certificate * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #define pr_fmt(fmt) "X.509: "fmt #include <crypto/hash.h> #include <keys/asymmetric-parser.h> #include <keys/asymmetric-subtype.h> #include <keys/system_keyring.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include "asymmetric_keys.h" #include "x509_parser.h" /* * Set up the signature parameters in an X.509 certificate. This involves * digesting the signed data and extracting the signature. */ int x509_get_sig_params(struct x509_certificate *cert) { struct public_key_signature *sig = cert->sig; struct crypto_shash *tfm; struct shash_desc *desc; size_t desc_size; int ret; pr_devel("==>%s()\n", __func__); sig->s = kmemdup(cert->raw_sig, cert->raw_sig_size, GFP_KERNEL); if (!sig->s) return -ENOMEM; sig->s_size = cert->raw_sig_size; /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(sig->hash_algo, 0, 0); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT) { cert->unsupported_sig = true; return 0; } return PTR_ERR(tfm); } desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); sig->digest_size = crypto_shash_digestsize(tfm); ret = -ENOMEM; sig->digest = kmalloc(sig->digest_size, GFP_KERNEL); if (!sig->digest) goto error; desc = kzalloc(desc_size, GFP_KERNEL); if (!desc) goto error; desc->tfm = tfm; ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest); if (ret < 0) goto error_2; ret = is_hash_blacklisted(sig->digest, sig->digest_size, BLACKLIST_HASH_X509_TBS); if (ret == -EKEYREJECTED) { pr_err("Cert %*phN is blacklisted\n", sig->digest_size, sig->digest); cert->blacklisted = true; ret = 0; } error_2: kfree(desc); error: crypto_free_shash(tfm); pr_devel("<==%s() = %d\n", __func__, ret); return ret; } /* * Check for self-signedness in an X.509 cert and if found, check the signature * immediately if we can. */ int x509_check_for_self_signed(struct x509_certificate *cert) { int ret = 0; pr_devel("==>%s()\n", __func__); if (cert->raw_subject_size != cert->raw_issuer_size || memcmp(cert->raw_subject, cert->raw_issuer, cert->raw_issuer_size) != 0) goto not_self_signed; if (cert->sig->auth_ids[0] || cert->sig->auth_ids[1]) { /* If the AKID is present it may have one or two parts. If * both are supplied, both must match. */ bool a = asymmetric_key_id_same(cert->skid, cert->sig->auth_ids[1]); bool b = asymmetric_key_id_same(cert->id, cert->sig->auth_ids[0]); if (!a && !b) goto not_self_signed; ret = -EKEYREJECTED; if (((a && !b) || (b && !a)) && cert->sig->auth_ids[0] && cert->sig->auth_ids[1]) goto out; } if (cert->unsupported_sig) { ret = 0; goto out; } ret = public_key_verify_signature(cert->pub, cert->sig); if (ret < 0) { if (ret == -ENOPKG) { cert->unsupported_sig = true; ret = 0; } goto out; } pr_devel("Cert Self-signature verified"); cert->self_signed = true; out: pr_devel("<==%s() = %d\n", __func__, ret); return ret; not_self_signed: pr_devel("<==%s() = 0 [not]\n", __func__); return 0; } /* * Attempt to parse a data blob for a key as an X509 certificate. */ static int x509_key_preparse(struct key_preparsed_payload *prep) { struct x509_certificate *cert __free(x509_free_certificate); struct asymmetric_key_ids *kids __free(kfree) = NULL; char *p, *desc __free(kfree) = NULL; const char *q; size_t srlen, sulen; cert = x509_cert_parse(prep->data, prep->datalen); if (IS_ERR(cert)) return PTR_ERR(cert); pr_devel("Cert Issuer: %s\n", cert->issuer); pr_devel("Cert Subject: %s\n", cert->subject); pr_devel("Cert Key Algo: %s\n", cert->pub->pkey_algo); pr_devel("Cert Valid period: %lld-%lld\n", cert->valid_from, cert->valid_to); cert->pub->id_type = "X509"; if (cert->unsupported_sig) { public_key_signature_free(cert->sig); cert->sig = NULL; } else { pr_devel("Cert Signature: %s + %s\n", cert->sig->pkey_algo, cert->sig->hash_algo); } /* Don't permit addition of blacklisted keys */ if (cert->blacklisted) return -EKEYREJECTED; /* Propose a description */ sulen = strlen(cert->subject); if (cert->raw_skid) { srlen = cert->raw_skid_size; q = cert->raw_skid; } else { srlen = cert->raw_serial_size; q = cert->raw_serial; } desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL); if (!desc) return -ENOMEM; p = memcpy(desc, cert->subject, sulen); p += sulen; *p++ = ':'; *p++ = ' '; p = bin2hex(p, q, srlen); *p = 0; kids = kmalloc(sizeof(struct asymmetric_key_ids), GFP_KERNEL); if (!kids) return -ENOMEM; kids->id[0] = cert->id; kids->id[1] = cert->skid; kids->id[2] = asymmetric_key_generate_id(cert->raw_subject, cert->raw_subject_size, "", 0); if (IS_ERR(kids->id[2])) return PTR_ERR(kids->id[2]); /* We're pinning the module by being linked against it */ __module_get(public_key_subtype.owner); prep->payload.data[asym_subtype] = &public_key_subtype; prep->payload.data[asym_key_ids] = kids; prep->payload.data[asym_crypto] = cert->pub; prep->payload.data[asym_auth] = cert->sig; prep->description = desc; prep->quotalen = 100; /* We've finished with the certificate */ cert->pub = NULL; cert->id = NULL; cert->skid = NULL; cert->sig = NULL; desc = NULL; kids = NULL; return 0; } static struct asymmetric_key_parser x509_key_parser = { .owner = THIS_MODULE, .name = "x509", .parse = x509_key_preparse, }; /* * Module stuff */ static int __init x509_key_init(void) { return register_asymmetric_key_parser(&x509_key_parser); } static void __exit x509_key_exit(void) { unregister_asymmetric_key_parser(&x509_key_parser); } module_init(x509_key_init); module_exit(x509_key_exit); MODULE_DESCRIPTION("X.509 certificate parser"); MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); |
| 2 2 1 2 15 15 15 3 1 2 2 2 4 1 4 4 4 4 1 1 1 1 1 1 1 15 15 15 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/netfilter.h> #include <linux/seq_file.h> #include <net/protocol.h> #include <net/netfilter/nf_log.h> #include "nf_internals.h" /* Internal logging interface, which relies on the real LOG target modules */ #define NFLOGGER_NAME_LEN 64 int sysctl_nf_log_all_netns __read_mostly; EXPORT_SYMBOL(sysctl_nf_log_all_netns); static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly; static DEFINE_MUTEX(nf_log_mutex); #define nft_log_dereference(logger) \ rcu_dereference_protected(logger, lockdep_is_held(&nf_log_mutex)) static struct nf_logger *__find_logger(int pf, const char *str_logger) { struct nf_logger *log; int i; for (i = 0; i < NF_LOG_TYPE_MAX; i++) { log = nft_log_dereference(loggers[pf][i]); if (!log) continue; if (!strncasecmp(str_logger, log->name, strlen(log->name))) return log; } return NULL; } int nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger) { const struct nf_logger *log; if (pf == NFPROTO_UNSPEC || pf >= ARRAY_SIZE(net->nf.nf_loggers)) return -EOPNOTSUPP; mutex_lock(&nf_log_mutex); log = nft_log_dereference(net->nf.nf_loggers[pf]); if (log == NULL) rcu_assign_pointer(net->nf.nf_loggers[pf], logger); mutex_unlock(&nf_log_mutex); return 0; } EXPORT_SYMBOL(nf_log_set); void nf_log_unset(struct net *net, const struct nf_logger *logger) { int i; const struct nf_logger *log; mutex_lock(&nf_log_mutex); for (i = 0; i < NFPROTO_NUMPROTO; i++) { log = nft_log_dereference(net->nf.nf_loggers[i]); if (log == logger) RCU_INIT_POINTER(net->nf.nf_loggers[i], NULL); } mutex_unlock(&nf_log_mutex); } EXPORT_SYMBOL(nf_log_unset); /* return EEXIST if the same logger is registered, 0 on success. */ int nf_log_register(u_int8_t pf, struct nf_logger *logger) { int i; int ret = 0; if (pf >= ARRAY_SIZE(init_net.nf.nf_loggers)) return -EINVAL; mutex_lock(&nf_log_mutex); if (pf == NFPROTO_UNSPEC) { for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) { if (rcu_access_pointer(loggers[i][logger->type])) { ret = -EEXIST; goto unlock; } } for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) rcu_assign_pointer(loggers[i][logger->type], logger); } else { if (rcu_access_pointer(loggers[pf][logger->type])) { ret = -EEXIST; goto unlock; } rcu_assign_pointer(loggers[pf][logger->type], logger); } unlock: mutex_unlock(&nf_log_mutex); return ret; } EXPORT_SYMBOL(nf_log_register); void nf_log_unregister(struct nf_logger *logger) { const struct nf_logger *log; int i; mutex_lock(&nf_log_mutex); for (i = 0; i < NFPROTO_NUMPROTO; i++) { log = nft_log_dereference(loggers[i][logger->type]); if (log == logger) RCU_INIT_POINTER(loggers[i][logger->type], NULL); } mutex_unlock(&nf_log_mutex); synchronize_rcu(); } EXPORT_SYMBOL(nf_log_unregister); /** * nf_log_is_registered - Check if any logger is registered for a given * protocol family. * * @pf: Protocol family * * Returns: true if at least one logger is active for @pf, false otherwise. */ bool nf_log_is_registered(u_int8_t pf) { int i; if (pf >= NFPROTO_NUMPROTO) { WARN_ON_ONCE(1); return false; } for (i = 0; i < NF_LOG_TYPE_MAX; i++) { if (rcu_access_pointer(loggers[pf][i])) return true; } return false; } EXPORT_SYMBOL(nf_log_is_registered); int nf_log_bind_pf(struct net *net, u_int8_t pf, const struct nf_logger *logger) { if (pf >= ARRAY_SIZE(net->nf.nf_loggers)) return -EINVAL; mutex_lock(&nf_log_mutex); if (__find_logger(pf, logger->name) == NULL) { mutex_unlock(&nf_log_mutex); return -ENOENT; } rcu_assign_pointer(net->nf.nf_loggers[pf], logger); mutex_unlock(&nf_log_mutex); return 0; } EXPORT_SYMBOL(nf_log_bind_pf); void nf_log_unbind_pf(struct net *net, u_int8_t pf) { if (pf >= ARRAY_SIZE(net->nf.nf_loggers)) return; mutex_lock(&nf_log_mutex); RCU_INIT_POINTER(net->nf.nf_loggers[pf], NULL); mutex_unlock(&nf_log_mutex); } EXPORT_SYMBOL(nf_log_unbind_pf); int nf_logger_find_get(int pf, enum nf_log_type type) { struct nf_logger *logger; int ret = -ENOENT; if (pf >= ARRAY_SIZE(loggers)) return -EINVAL; if (type >= NF_LOG_TYPE_MAX) return -EINVAL; if (pf == NFPROTO_INET) { ret = nf_logger_find_get(NFPROTO_IPV4, type); if (ret < 0) return ret; ret = nf_logger_find_get(NFPROTO_IPV6, type); if (ret < 0) { nf_logger_put(NFPROTO_IPV4, type); return ret; } return 0; } rcu_read_lock(); logger = rcu_dereference(loggers[pf][type]); if (logger == NULL) goto out; if (try_module_get(logger->me)) ret = 0; out: rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(nf_logger_find_get); void nf_logger_put(int pf, enum nf_log_type type) { struct nf_logger *logger; if (pf == NFPROTO_INET) { nf_logger_put(NFPROTO_IPV4, type); nf_logger_put(NFPROTO_IPV6, type); return; } rcu_read_lock(); logger = rcu_dereference(loggers[pf][type]); if (!logger) WARN_ON_ONCE(1); else module_put(logger->me); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(nf_logger_put); void nf_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *fmt, ...) { va_list args; char prefix[NF_LOG_PREFIXLEN]; const struct nf_logger *logger; rcu_read_lock(); if (loginfo != NULL) logger = rcu_dereference(loggers[pf][loginfo->type]); else logger = rcu_dereference(net->nf.nf_loggers[pf]); if (logger) { va_start(args, fmt); vsnprintf(prefix, sizeof(prefix), fmt, args); va_end(args); logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix); } rcu_read_unlock(); } EXPORT_SYMBOL(nf_log_packet); void nf_log_trace(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *fmt, ...) { va_list args; char prefix[NF_LOG_PREFIXLEN]; const struct nf_logger *logger; rcu_read_lock(); logger = rcu_dereference(net->nf.nf_loggers[pf]); if (logger) { va_start(args, fmt); vsnprintf(prefix, sizeof(prefix), fmt, args); va_end(args); logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix); } rcu_read_unlock(); } EXPORT_SYMBOL(nf_log_trace); #define S_SIZE (1024 - (sizeof(unsigned int) + 1)) struct nf_log_buf { unsigned int count; char buf[S_SIZE + 1]; }; static struct nf_log_buf emergency, *emergency_ptr = &emergency; __printf(2, 3) int nf_log_buf_add(struct nf_log_buf *m, const char *f, ...) { va_list args; int len; if (likely(m->count < S_SIZE)) { va_start(args, f); len = vsnprintf(m->buf + m->count, S_SIZE - m->count, f, args); va_end(args); if (likely(m->count + len < S_SIZE)) { m->count += len; return 0; } } m->count = S_SIZE; printk_once(KERN_ERR KBUILD_MODNAME " please increase S_SIZE\n"); return -1; } EXPORT_SYMBOL_GPL(nf_log_buf_add); struct nf_log_buf *nf_log_buf_open(void) { struct nf_log_buf *m = kmalloc(sizeof(*m), GFP_ATOMIC); if (unlikely(!m)) { local_bh_disable(); do { m = xchg(&emergency_ptr, NULL); } while (!m); } m->count = 0; return m; } EXPORT_SYMBOL_GPL(nf_log_buf_open); void nf_log_buf_close(struct nf_log_buf *m) { m->buf[m->count] = 0; printk("%s\n", m->buf); if (likely(m != &emergency)) kfree(m); else { emergency_ptr = m; local_bh_enable(); } } EXPORT_SYMBOL_GPL(nf_log_buf_close); #ifdef CONFIG_PROC_FS static void *seq_start(struct seq_file *seq, loff_t *pos) { struct net *net = seq_file_net(seq); mutex_lock(&nf_log_mutex); if (*pos >= ARRAY_SIZE(net->nf.nf_loggers)) return NULL; return pos; } static void *seq_next(struct seq_file *s, void *v, loff_t *pos) { struct net *net = seq_file_net(s); (*pos)++; if (*pos >= ARRAY_SIZE(net->nf.nf_loggers)) return NULL; return pos; } static void seq_stop(struct seq_file *s, void *v) { mutex_unlock(&nf_log_mutex); } static int seq_show(struct seq_file *s, void *v) { loff_t *pos = v; const struct nf_logger *logger; int i; struct net *net = seq_file_net(s); logger = nft_log_dereference(net->nf.nf_loggers[*pos]); if (!logger) seq_printf(s, "%2lld NONE (", *pos); else seq_printf(s, "%2lld %s (", *pos, logger->name); if (seq_has_overflowed(s)) return -ENOSPC; for (i = 0; i < NF_LOG_TYPE_MAX; i++) { if (loggers[*pos][i] == NULL) continue; logger = nft_log_dereference(loggers[*pos][i]); seq_puts(s, logger->name); if (i == 0 && loggers[*pos][i + 1] != NULL) seq_puts(s, ","); if (seq_has_overflowed(s)) return -ENOSPC; } seq_puts(s, ")\n"); if (seq_has_overflowed(s)) return -ENOSPC; return 0; } static const struct seq_operations nflog_seq_ops = { .start = seq_start, .next = seq_next, .stop = seq_stop, .show = seq_show, }; #endif /* PROC_FS */ #ifdef CONFIG_SYSCTL static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3]; static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO]; static struct ctl_table_header *nf_log_sysctl_fhdr; static struct ctl_table nf_log_sysctl_ftable[] = { { .procname = "nf_log_all_netns", .data = &sysctl_nf_log_all_netns, .maxlen = sizeof(sysctl_nf_log_all_netns), .mode = 0644, .proc_handler = proc_dointvec, }, }; static int nf_log_proc_dostring(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { const struct nf_logger *logger; char buf[NFLOGGER_NAME_LEN]; int r = 0; int tindex = (unsigned long)table->extra1; struct net *net = table->extra2; if (write) { struct ctl_table tmp = *table; /* proc_dostring() can append to existing strings, so we need to * initialize it as an empty string. */ buf[0] = '\0'; tmp.data = buf; r = proc_dostring(&tmp, write, buffer, lenp, ppos); if (r) return r; if (!strcmp(buf, "NONE")) { nf_log_unbind_pf(net, tindex); return 0; } mutex_lock(&nf_log_mutex); logger = __find_logger(tindex, buf); if (logger == NULL) { mutex_unlock(&nf_log_mutex); return -ENOENT; } rcu_assign_pointer(net->nf.nf_loggers[tindex], logger); mutex_unlock(&nf_log_mutex); } else { struct ctl_table tmp = *table; tmp.data = buf; mutex_lock(&nf_log_mutex); logger = nft_log_dereference(net->nf.nf_loggers[tindex]); if (!logger) strscpy(buf, "NONE", sizeof(buf)); else strscpy(buf, logger->name, sizeof(buf)); mutex_unlock(&nf_log_mutex); r = proc_dostring(&tmp, write, buffer, lenp, ppos); } return r; } static int netfilter_log_sysctl_init(struct net *net) { int i; struct ctl_table *table; table = nf_log_sysctl_table; if (!net_eq(net, &init_net)) { table = kmemdup(nf_log_sysctl_table, sizeof(nf_log_sysctl_table), GFP_KERNEL); if (!table) goto err_alloc; } else { for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) { snprintf(nf_log_sysctl_fnames[i], 3, "%d", i); nf_log_sysctl_table[i].procname = nf_log_sysctl_fnames[i]; nf_log_sysctl_table[i].maxlen = NFLOGGER_NAME_LEN; nf_log_sysctl_table[i].mode = 0644; nf_log_sysctl_table[i].proc_handler = nf_log_proc_dostring; nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i; } nf_log_sysctl_fhdr = register_net_sysctl(net, "net/netfilter", nf_log_sysctl_ftable); if (!nf_log_sysctl_fhdr) goto err_freg; } for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) table[i].extra2 = net; net->nf.nf_log_dir_header = register_net_sysctl_sz(net, "net/netfilter/nf_log", table, ARRAY_SIZE(nf_log_sysctl_table)); if (!net->nf.nf_log_dir_header) goto err_reg; return 0; err_reg: if (!net_eq(net, &init_net)) kfree(table); else unregister_net_sysctl_table(nf_log_sysctl_fhdr); err_freg: err_alloc: return -ENOMEM; } static void netfilter_log_sysctl_exit(struct net *net) { const struct ctl_table *table; table = net->nf.nf_log_dir_header->ctl_table_arg; unregister_net_sysctl_table(net->nf.nf_log_dir_header); if (!net_eq(net, &init_net)) kfree(table); else unregister_net_sysctl_table(nf_log_sysctl_fhdr); } #else static int netfilter_log_sysctl_init(struct net *net) { return 0; } static void netfilter_log_sysctl_exit(struct net *net) { } #endif /* CONFIG_SYSCTL */ static int __net_init nf_log_net_init(struct net *net) { int ret = -ENOMEM; #ifdef CONFIG_PROC_FS if (!proc_create_net("nf_log", 0444, net->nf.proc_netfilter, &nflog_seq_ops, sizeof(struct seq_net_private))) return ret; #endif ret = netfilter_log_sysctl_init(net); if (ret < 0) goto out_sysctl; return 0; out_sysctl: #ifdef CONFIG_PROC_FS remove_proc_entry("nf_log", net->nf.proc_netfilter); #endif return ret; } static void __net_exit nf_log_net_exit(struct net *net) { netfilter_log_sysctl_exit(net); #ifdef CONFIG_PROC_FS remove_proc_entry("nf_log", net->nf.proc_netfilter); #endif } static struct pernet_operations nf_log_net_ops = { .init = nf_log_net_init, .exit = nf_log_net_exit, }; int __init netfilter_log_init(void) { return register_pernet_subsys(&nf_log_net_ops); } |
| 10 10 10 10 10 1 10 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 | // SPDX-License-Identifier: GPL-2.0-or-later /* * iSCSI transport class definitions * * Copyright (C) IBM Corporation, 2004 * Copyright (C) Mike Christie, 2004 - 2005 * Copyright (C) Dmitry Yusupov, 2004 - 2005 * Copyright (C) Alex Aizman, 2004 - 2005 */ #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/bsg-lib.h> #include <linux/idr.h> #include <net/tcp.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_iscsi.h> #include <scsi/iscsi_if.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_bsg_iscsi.h> #define ISCSI_TRANSPORT_VERSION "2.0-870" #define ISCSI_SEND_MAX_ALLOWED 10 #define CREATE_TRACE_POINTS #include <trace/events/iscsi.h> /* * Export tracepoint symbols to be used by other modules. */ EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_conn); EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_eh); EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_session); EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_tcp); EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_sw_tcp); static int dbg_session; module_param_named(debug_session, dbg_session, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug_session, "Turn on debugging for sessions in scsi_transport_iscsi " "module. Set to 1 to turn on, and zero to turn off. Default " "is off."); static int dbg_conn; module_param_named(debug_conn, dbg_conn, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug_conn, "Turn on debugging for connections in scsi_transport_iscsi " "module. Set to 1 to turn on, and zero to turn off. Default " "is off."); #define ISCSI_DBG_TRANS_SESSION(_session, dbg_fmt, arg...) \ do { \ if (dbg_session) \ iscsi_cls_session_printk(KERN_INFO, _session, \ "%s: " dbg_fmt, \ __func__, ##arg); \ iscsi_dbg_trace(trace_iscsi_dbg_trans_session, \ &(_session)->dev, \ "%s " dbg_fmt, __func__, ##arg); \ } while (0); #define ISCSI_DBG_TRANS_CONN(_conn, dbg_fmt, arg...) \ do { \ if (dbg_conn) \ iscsi_cls_conn_printk(KERN_INFO, _conn, \ "%s: " dbg_fmt, \ __func__, ##arg); \ iscsi_dbg_trace(trace_iscsi_dbg_trans_conn, \ &(_conn)->dev, \ "%s " dbg_fmt, __func__, ##arg); \ } while (0); struct iscsi_internal { struct scsi_transport_template t; struct iscsi_transport *iscsi_transport; struct list_head list; struct device dev; struct transport_container conn_cont; struct transport_container session_cont; }; static DEFINE_IDR(iscsi_ep_idr); static DEFINE_MUTEX(iscsi_ep_idr_mutex); static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ static struct workqueue_struct *iscsi_conn_cleanup_workq; static DEFINE_IDA(iscsi_sess_ida); /* * list of registered transports and lock that must * be held while accessing list. The iscsi_transport_lock must * be acquired after the rx_queue_mutex. */ static LIST_HEAD(iscsi_transports); static DEFINE_SPINLOCK(iscsi_transport_lock); #define to_iscsi_internal(tmpl) \ container_of(tmpl, struct iscsi_internal, t) #define dev_to_iscsi_internal(_dev) \ container_of(_dev, struct iscsi_internal, dev) static void iscsi_transport_release(struct device *dev) { struct iscsi_internal *priv = dev_to_iscsi_internal(dev); kfree(priv); } /* * iscsi_transport_class represents the iscsi_transports that are * registered. */ static struct class iscsi_transport_class = { .name = "iscsi_transport", .dev_release = iscsi_transport_release, }; static ssize_t show_transport_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_internal *priv = dev_to_iscsi_internal(dev); if (!capable(CAP_SYS_ADMIN)) return -EACCES; return sysfs_emit(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport)); } static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL); #define show_transport_attr(name, format) \ static ssize_t \ show_transport_##name(struct device *dev, \ struct device_attribute *attr,char *buf) \ { \ struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \ return sysfs_emit(buf, format"\n", priv->iscsi_transport->name);\ } \ static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL); show_transport_attr(caps, "0x%x"); static struct attribute *iscsi_transport_attrs[] = { &dev_attr_handle.attr, &dev_attr_caps.attr, NULL, }; static struct attribute_group iscsi_transport_group = { .attrs = iscsi_transport_attrs, }; /* * iSCSI endpoint attrs */ #define iscsi_dev_to_endpoint(_dev) \ container_of(_dev, struct iscsi_endpoint, dev) #define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \ struct device_attribute dev_attr_##_prefix##_##_name = \ __ATTR(_name,_mode,_show,_store) static void iscsi_endpoint_release(struct device *dev) { struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); mutex_lock(&iscsi_ep_idr_mutex); idr_remove(&iscsi_ep_idr, ep->id); mutex_unlock(&iscsi_ep_idr_mutex); kfree(ep); } static struct class iscsi_endpoint_class = { .name = "iscsi_endpoint", .dev_release = iscsi_endpoint_release, }; static ssize_t show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); return sysfs_emit(buf, "%d\n", ep->id); } static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL); static struct attribute *iscsi_endpoint_attrs[] = { &dev_attr_ep_handle.attr, NULL, }; static struct attribute_group iscsi_endpoint_group = { .attrs = iscsi_endpoint_attrs, }; struct iscsi_endpoint * iscsi_create_endpoint(int dd_size) { struct iscsi_endpoint *ep; int err, id; ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL); if (!ep) return NULL; mutex_lock(&iscsi_ep_idr_mutex); /* * First endpoint id should be 1 to comply with user space * applications (iscsid). */ id = idr_alloc(&iscsi_ep_idr, ep, 1, -1, GFP_NOIO); if (id < 0) { mutex_unlock(&iscsi_ep_idr_mutex); printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n", id); goto free_ep; } mutex_unlock(&iscsi_ep_idr_mutex); ep->id = id; ep->dev.class = &iscsi_endpoint_class; dev_set_name(&ep->dev, "ep-%d", id); err = device_register(&ep->dev); if (err) goto put_dev; err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group); if (err) goto unregister_dev; if (dd_size) ep->dd_data = &ep[1]; return ep; unregister_dev: device_unregister(&ep->dev); return NULL; put_dev: mutex_lock(&iscsi_ep_idr_mutex); idr_remove(&iscsi_ep_idr, id); mutex_unlock(&iscsi_ep_idr_mutex); put_device(&ep->dev); return NULL; free_ep: kfree(ep); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_endpoint); void iscsi_destroy_endpoint(struct iscsi_endpoint *ep) { sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group); device_unregister(&ep->dev); } EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint); void iscsi_put_endpoint(struct iscsi_endpoint *ep) { put_device(&ep->dev); } EXPORT_SYMBOL_GPL(iscsi_put_endpoint); /** * iscsi_lookup_endpoint - get ep from handle * @handle: endpoint handle * * Caller must do a iscsi_put_endpoint. */ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle) { struct iscsi_endpoint *ep; mutex_lock(&iscsi_ep_idr_mutex); ep = idr_find(&iscsi_ep_idr, handle); if (!ep) goto unlock; get_device(&ep->dev); unlock: mutex_unlock(&iscsi_ep_idr_mutex); return ep; } EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint); /* * Interface to display network param to sysfs */ static void iscsi_iface_release(struct device *dev) { struct iscsi_iface *iface = iscsi_dev_to_iface(dev); struct device *parent = iface->dev.parent; kfree(iface); put_device(parent); } static struct class iscsi_iface_class = { .name = "iscsi_iface", .dev_release = iscsi_iface_release, }; #define ISCSI_IFACE_ATTR(_prefix, _name, _mode, _show, _store) \ struct device_attribute dev_attr_##_prefix##_##_name = \ __ATTR(_name, _mode, _show, _store) /* iface attrs show */ #define iscsi_iface_attr_show(type, name, param_type, param) \ static ssize_t \ show_##type##_##name(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct iscsi_iface *iface = iscsi_dev_to_iface(dev); \ struct iscsi_transport *t = iface->transport; \ return t->get_iface_param(iface, param_type, param, buf); \ } \ #define iscsi_iface_net_attr(type, name, param) \ iscsi_iface_attr_show(type, name, ISCSI_NET_PARAM, param) \ static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL); #define iscsi_iface_attr(type, name, param) \ iscsi_iface_attr_show(type, name, ISCSI_IFACE_PARAM, param) \ static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL); /* generic read only ipv4 attribute */ iscsi_iface_net_attr(ipv4_iface, ipaddress, ISCSI_NET_PARAM_IPV4_ADDR); iscsi_iface_net_attr(ipv4_iface, gateway, ISCSI_NET_PARAM_IPV4_GW); iscsi_iface_net_attr(ipv4_iface, subnet, ISCSI_NET_PARAM_IPV4_SUBNET); iscsi_iface_net_attr(ipv4_iface, bootproto, ISCSI_NET_PARAM_IPV4_BOOTPROTO); iscsi_iface_net_attr(ipv4_iface, dhcp_dns_address_en, ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN); iscsi_iface_net_attr(ipv4_iface, dhcp_slp_da_info_en, ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN); iscsi_iface_net_attr(ipv4_iface, tos_en, ISCSI_NET_PARAM_IPV4_TOS_EN); iscsi_iface_net_attr(ipv4_iface, tos, ISCSI_NET_PARAM_IPV4_TOS); iscsi_iface_net_attr(ipv4_iface, grat_arp_en, ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN); iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id_en, ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN); iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id, ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID); iscsi_iface_net_attr(ipv4_iface, dhcp_req_vendor_id_en, ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN); iscsi_iface_net_attr(ipv4_iface, dhcp_use_vendor_id_en, ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN); iscsi_iface_net_attr(ipv4_iface, dhcp_vendor_id, ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID); iscsi_iface_net_attr(ipv4_iface, dhcp_learn_iqn_en, ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN); iscsi_iface_net_attr(ipv4_iface, fragment_disable, ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE); iscsi_iface_net_attr(ipv4_iface, incoming_forwarding_en, ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN); iscsi_iface_net_attr(ipv4_iface, ttl, ISCSI_NET_PARAM_IPV4_TTL); /* generic read only ipv6 attribute */ iscsi_iface_net_attr(ipv6_iface, ipaddress, ISCSI_NET_PARAM_IPV6_ADDR); iscsi_iface_net_attr(ipv6_iface, link_local_addr, ISCSI_NET_PARAM_IPV6_LINKLOCAL); iscsi_iface_net_attr(ipv6_iface, router_addr, ISCSI_NET_PARAM_IPV6_ROUTER); iscsi_iface_net_attr(ipv6_iface, ipaddr_autocfg, ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG); iscsi_iface_net_attr(ipv6_iface, link_local_autocfg, ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG); iscsi_iface_net_attr(ipv6_iface, link_local_state, ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE); iscsi_iface_net_attr(ipv6_iface, router_state, ISCSI_NET_PARAM_IPV6_ROUTER_STATE); iscsi_iface_net_attr(ipv6_iface, grat_neighbor_adv_en, ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN); iscsi_iface_net_attr(ipv6_iface, mld_en, ISCSI_NET_PARAM_IPV6_MLD_EN); iscsi_iface_net_attr(ipv6_iface, flow_label, ISCSI_NET_PARAM_IPV6_FLOW_LABEL); iscsi_iface_net_attr(ipv6_iface, traffic_class, ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS); iscsi_iface_net_attr(ipv6_iface, hop_limit, ISCSI_NET_PARAM_IPV6_HOP_LIMIT); iscsi_iface_net_attr(ipv6_iface, nd_reachable_tmo, ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO); iscsi_iface_net_attr(ipv6_iface, nd_rexmit_time, ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME); iscsi_iface_net_attr(ipv6_iface, nd_stale_tmo, ISCSI_NET_PARAM_IPV6_ND_STALE_TMO); iscsi_iface_net_attr(ipv6_iface, dup_addr_detect_cnt, ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT); iscsi_iface_net_attr(ipv6_iface, router_adv_link_mtu, ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU); /* common read only iface attribute */ iscsi_iface_net_attr(iface, enabled, ISCSI_NET_PARAM_IFACE_ENABLE); iscsi_iface_net_attr(iface, vlan_id, ISCSI_NET_PARAM_VLAN_ID); iscsi_iface_net_attr(iface, vlan_priority, ISCSI_NET_PARAM_VLAN_PRIORITY); iscsi_iface_net_attr(iface, vlan_enabled, ISCSI_NET_PARAM_VLAN_ENABLED); iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU); iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT); iscsi_iface_net_attr(iface, ipaddress_state, ISCSI_NET_PARAM_IPADDR_STATE); iscsi_iface_net_attr(iface, delayed_ack_en, ISCSI_NET_PARAM_DELAYED_ACK_EN); iscsi_iface_net_attr(iface, tcp_nagle_disable, ISCSI_NET_PARAM_TCP_NAGLE_DISABLE); iscsi_iface_net_attr(iface, tcp_wsf_disable, ISCSI_NET_PARAM_TCP_WSF_DISABLE); iscsi_iface_net_attr(iface, tcp_wsf, ISCSI_NET_PARAM_TCP_WSF); iscsi_iface_net_attr(iface, tcp_timer_scale, ISCSI_NET_PARAM_TCP_TIMER_SCALE); iscsi_iface_net_attr(iface, tcp_timestamp_en, ISCSI_NET_PARAM_TCP_TIMESTAMP_EN); iscsi_iface_net_attr(iface, cache_id, ISCSI_NET_PARAM_CACHE_ID); iscsi_iface_net_attr(iface, redirect_en, ISCSI_NET_PARAM_REDIRECT_EN); /* common iscsi specific settings attributes */ iscsi_iface_attr(iface, def_taskmgmt_tmo, ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO); iscsi_iface_attr(iface, header_digest, ISCSI_IFACE_PARAM_HDRDGST_EN); iscsi_iface_attr(iface, data_digest, ISCSI_IFACE_PARAM_DATADGST_EN); iscsi_iface_attr(iface, immediate_data, ISCSI_IFACE_PARAM_IMM_DATA_EN); iscsi_iface_attr(iface, initial_r2t, ISCSI_IFACE_PARAM_INITIAL_R2T_EN); iscsi_iface_attr(iface, data_seq_in_order, ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN); iscsi_iface_attr(iface, data_pdu_in_order, ISCSI_IFACE_PARAM_PDU_INORDER_EN); iscsi_iface_attr(iface, erl, ISCSI_IFACE_PARAM_ERL); iscsi_iface_attr(iface, max_recv_dlength, ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH); iscsi_iface_attr(iface, first_burst_len, ISCSI_IFACE_PARAM_FIRST_BURST); iscsi_iface_attr(iface, max_outstanding_r2t, ISCSI_IFACE_PARAM_MAX_R2T); iscsi_iface_attr(iface, max_burst_len, ISCSI_IFACE_PARAM_MAX_BURST); iscsi_iface_attr(iface, chap_auth, ISCSI_IFACE_PARAM_CHAP_AUTH_EN); iscsi_iface_attr(iface, bidi_chap, ISCSI_IFACE_PARAM_BIDI_CHAP_EN); iscsi_iface_attr(iface, discovery_auth_optional, ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL); iscsi_iface_attr(iface, discovery_logout, ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN); iscsi_iface_attr(iface, strict_login_comp_en, ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN); iscsi_iface_attr(iface, initiator_name, ISCSI_IFACE_PARAM_INITIATOR_NAME); static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *dev = container_of(kobj, struct device, kobj); struct iscsi_iface *iface = iscsi_dev_to_iface(dev); struct iscsi_transport *t = iface->transport; int param = -1; if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; else if (attr == &dev_attr_iface_header_digest.attr) param = ISCSI_IFACE_PARAM_HDRDGST_EN; else if (attr == &dev_attr_iface_data_digest.attr) param = ISCSI_IFACE_PARAM_DATADGST_EN; else if (attr == &dev_attr_iface_immediate_data.attr) param = ISCSI_IFACE_PARAM_IMM_DATA_EN; else if (attr == &dev_attr_iface_initial_r2t.attr) param = ISCSI_IFACE_PARAM_INITIAL_R2T_EN; else if (attr == &dev_attr_iface_data_seq_in_order.attr) param = ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN; else if (attr == &dev_attr_iface_data_pdu_in_order.attr) param = ISCSI_IFACE_PARAM_PDU_INORDER_EN; else if (attr == &dev_attr_iface_erl.attr) param = ISCSI_IFACE_PARAM_ERL; else if (attr == &dev_attr_iface_max_recv_dlength.attr) param = ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH; else if (attr == &dev_attr_iface_first_burst_len.attr) param = ISCSI_IFACE_PARAM_FIRST_BURST; else if (attr == &dev_attr_iface_max_outstanding_r2t.attr) param = ISCSI_IFACE_PARAM_MAX_R2T; else if (attr == &dev_attr_iface_max_burst_len.attr) param = ISCSI_IFACE_PARAM_MAX_BURST; else if (attr == &dev_attr_iface_chap_auth.attr) param = ISCSI_IFACE_PARAM_CHAP_AUTH_EN; else if (attr == &dev_attr_iface_bidi_chap.attr) param = ISCSI_IFACE_PARAM_BIDI_CHAP_EN; else if (attr == &dev_attr_iface_discovery_auth_optional.attr) param = ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL; else if (attr == &dev_attr_iface_discovery_logout.attr) param = ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN; else if (attr == &dev_attr_iface_strict_login_comp_en.attr) param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN; else if (attr == &dev_attr_iface_initiator_name.attr) param = ISCSI_IFACE_PARAM_INITIATOR_NAME; if (param != -1) return t->attr_is_visible(ISCSI_IFACE_PARAM, param); if (attr == &dev_attr_iface_enabled.attr) param = ISCSI_NET_PARAM_IFACE_ENABLE; else if (attr == &dev_attr_iface_vlan_id.attr) param = ISCSI_NET_PARAM_VLAN_ID; else if (attr == &dev_attr_iface_vlan_priority.attr) param = ISCSI_NET_PARAM_VLAN_PRIORITY; else if (attr == &dev_attr_iface_vlan_enabled.attr) param = ISCSI_NET_PARAM_VLAN_ENABLED; else if (attr == &dev_attr_iface_mtu.attr) param = ISCSI_NET_PARAM_MTU; else if (attr == &dev_attr_iface_port.attr) param = ISCSI_NET_PARAM_PORT; else if (attr == &dev_attr_iface_ipaddress_state.attr) param = ISCSI_NET_PARAM_IPADDR_STATE; else if (attr == &dev_attr_iface_delayed_ack_en.attr) param = ISCSI_NET_PARAM_DELAYED_ACK_EN; else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; else if (attr == &dev_attr_iface_tcp_wsf.attr) param = ISCSI_NET_PARAM_TCP_WSF; else if (attr == &dev_attr_iface_tcp_timer_scale.attr) param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; else if (attr == &dev_attr_iface_cache_id.attr) param = ISCSI_NET_PARAM_CACHE_ID; else if (attr == &dev_attr_iface_redirect_en.attr) param = ISCSI_NET_PARAM_REDIRECT_EN; else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { if (attr == &dev_attr_ipv4_iface_ipaddress.attr) param = ISCSI_NET_PARAM_IPV4_ADDR; else if (attr == &dev_attr_ipv4_iface_gateway.attr) param = ISCSI_NET_PARAM_IPV4_GW; else if (attr == &dev_attr_ipv4_iface_subnet.attr) param = ISCSI_NET_PARAM_IPV4_SUBNET; else if (attr == &dev_attr_ipv4_iface_bootproto.attr) param = ISCSI_NET_PARAM_IPV4_BOOTPROTO; else if (attr == &dev_attr_ipv4_iface_dhcp_dns_address_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN; else if (attr == &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN; else if (attr == &dev_attr_ipv4_iface_tos_en.attr) param = ISCSI_NET_PARAM_IPV4_TOS_EN; else if (attr == &dev_attr_ipv4_iface_tos.attr) param = ISCSI_NET_PARAM_IPV4_TOS; else if (attr == &dev_attr_ipv4_iface_grat_arp_en.attr) param = ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN; else if (attr == &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN; else if (attr == &dev_attr_ipv4_iface_dhcp_alt_client_id.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID; else if (attr == &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN; else if (attr == &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN; else if (attr == &dev_attr_ipv4_iface_dhcp_vendor_id.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID; else if (attr == &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr) param = ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN; else if (attr == &dev_attr_ipv4_iface_fragment_disable.attr) param = ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE; else if (attr == &dev_attr_ipv4_iface_incoming_forwarding_en.attr) param = ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN; else if (attr == &dev_attr_ipv4_iface_ttl.attr) param = ISCSI_NET_PARAM_IPV4_TTL; else return 0; } else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) { if (attr == &dev_attr_ipv6_iface_ipaddress.attr) param = ISCSI_NET_PARAM_IPV6_ADDR; else if (attr == &dev_attr_ipv6_iface_link_local_addr.attr) param = ISCSI_NET_PARAM_IPV6_LINKLOCAL; else if (attr == &dev_attr_ipv6_iface_router_addr.attr) param = ISCSI_NET_PARAM_IPV6_ROUTER; else if (attr == &dev_attr_ipv6_iface_ipaddr_autocfg.attr) param = ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG; else if (attr == &dev_attr_ipv6_iface_link_local_autocfg.attr) param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG; else if (attr == &dev_attr_ipv6_iface_link_local_state.attr) param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE; else if (attr == &dev_attr_ipv6_iface_router_state.attr) param = ISCSI_NET_PARAM_IPV6_ROUTER_STATE; else if (attr == &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr) param = ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN; else if (attr == &dev_attr_ipv6_iface_mld_en.attr) param = ISCSI_NET_PARAM_IPV6_MLD_EN; else if (attr == &dev_attr_ipv6_iface_flow_label.attr) param = ISCSI_NET_PARAM_IPV6_FLOW_LABEL; else if (attr == &dev_attr_ipv6_iface_traffic_class.attr) param = ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS; else if (attr == &dev_attr_ipv6_iface_hop_limit.attr) param = ISCSI_NET_PARAM_IPV6_HOP_LIMIT; else if (attr == &dev_attr_ipv6_iface_nd_reachable_tmo.attr) param = ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO; else if (attr == &dev_attr_ipv6_iface_nd_rexmit_time.attr) param = ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME; else if (attr == &dev_attr_ipv6_iface_nd_stale_tmo.attr) param = ISCSI_NET_PARAM_IPV6_ND_STALE_TMO; else if (attr == &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr) param = ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT; else if (attr == &dev_attr_ipv6_iface_router_adv_link_mtu.attr) param = ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU; else return 0; } else { WARN_ONCE(1, "Invalid iface attr"); return 0; } return t->attr_is_visible(ISCSI_NET_PARAM, param); } static struct attribute *iscsi_iface_attrs[] = { &dev_attr_iface_enabled.attr, &dev_attr_iface_vlan_id.attr, &dev_attr_iface_vlan_priority.attr, &dev_attr_iface_vlan_enabled.attr, &dev_attr_ipv4_iface_ipaddress.attr, &dev_attr_ipv4_iface_gateway.attr, &dev_attr_ipv4_iface_subnet.attr, &dev_attr_ipv4_iface_bootproto.attr, &dev_attr_ipv6_iface_ipaddress.attr, &dev_attr_ipv6_iface_link_local_addr.attr, &dev_attr_ipv6_iface_router_addr.attr, &dev_attr_ipv6_iface_ipaddr_autocfg.attr, &dev_attr_ipv6_iface_link_local_autocfg.attr, &dev_attr_iface_mtu.attr, &dev_attr_iface_port.attr, &dev_attr_iface_ipaddress_state.attr, &dev_attr_iface_delayed_ack_en.attr, &dev_attr_iface_tcp_nagle_disable.attr, &dev_attr_iface_tcp_wsf_disable.attr, &dev_attr_iface_tcp_wsf.attr, &dev_attr_iface_tcp_timer_scale.attr, &dev_attr_iface_tcp_timestamp_en.attr, &dev_attr_iface_cache_id.attr, &dev_attr_iface_redirect_en.attr, &dev_attr_iface_def_taskmgmt_tmo.attr, &dev_attr_iface_header_digest.attr, &dev_attr_iface_data_digest.attr, &dev_attr_iface_immediate_data.attr, &dev_attr_iface_initial_r2t.attr, &dev_attr_iface_data_seq_in_order.attr, &dev_attr_iface_data_pdu_in_order.attr, &dev_attr_iface_erl.attr, &dev_attr_iface_max_recv_dlength.attr, &dev_attr_iface_first_burst_len.attr, &dev_attr_iface_max_outstanding_r2t.attr, &dev_attr_iface_max_burst_len.attr, &dev_attr_iface_chap_auth.attr, &dev_attr_iface_bidi_chap.attr, &dev_attr_iface_discovery_auth_optional.attr, &dev_attr_iface_discovery_logout.attr, &dev_attr_iface_strict_login_comp_en.attr, &dev_attr_iface_initiator_name.attr, &dev_attr_ipv4_iface_dhcp_dns_address_en.attr, &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr, &dev_attr_ipv4_iface_tos_en.attr, &dev_attr_ipv4_iface_tos.attr, &dev_attr_ipv4_iface_grat_arp_en.attr, &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr, &dev_attr_ipv4_iface_dhcp_alt_client_id.attr, &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr, &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr, &dev_attr_ipv4_iface_dhcp_vendor_id.attr, &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr, &dev_attr_ipv4_iface_fragment_disable.attr, &dev_attr_ipv4_iface_incoming_forwarding_en.attr, &dev_attr_ipv4_iface_ttl.attr, &dev_attr_ipv6_iface_link_local_state.attr, &dev_attr_ipv6_iface_router_state.attr, &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr, &dev_attr_ipv6_iface_mld_en.attr, &dev_attr_ipv6_iface_flow_label.attr, &dev_attr_ipv6_iface_traffic_class.attr, &dev_attr_ipv6_iface_hop_limit.attr, &dev_attr_ipv6_iface_nd_reachable_tmo.attr, &dev_attr_ipv6_iface_nd_rexmit_time.attr, &dev_attr_ipv6_iface_nd_stale_tmo.attr, &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr, &dev_attr_ipv6_iface_router_adv_link_mtu.attr, NULL, }; static struct attribute_group iscsi_iface_group = { .attrs = iscsi_iface_attrs, .is_visible = iscsi_iface_attr_is_visible, }; /* convert iscsi_ipaddress_state values to ascii string name */ static const struct { enum iscsi_ipaddress_state value; char *name; } iscsi_ipaddress_state_names[] = { {ISCSI_IPDDRESS_STATE_UNCONFIGURED, "Unconfigured" }, {ISCSI_IPDDRESS_STATE_ACQUIRING, "Acquiring" }, {ISCSI_IPDDRESS_STATE_TENTATIVE, "Tentative" }, {ISCSI_IPDDRESS_STATE_VALID, "Valid" }, {ISCSI_IPDDRESS_STATE_DISABLING, "Disabling" }, {ISCSI_IPDDRESS_STATE_INVALID, "Invalid" }, {ISCSI_IPDDRESS_STATE_DEPRECATED, "Deprecated" }, }; char *iscsi_get_ipaddress_state_name(enum iscsi_ipaddress_state port_state) { int i; char *state = NULL; for (i = 0; i < ARRAY_SIZE(iscsi_ipaddress_state_names); i++) { if (iscsi_ipaddress_state_names[i].value == port_state) { state = iscsi_ipaddress_state_names[i].name; break; } } return state; } EXPORT_SYMBOL_GPL(iscsi_get_ipaddress_state_name); /* convert iscsi_router_state values to ascii string name */ static const struct { enum iscsi_router_state value; char *name; } iscsi_router_state_names[] = { {ISCSI_ROUTER_STATE_UNKNOWN, "Unknown" }, {ISCSI_ROUTER_STATE_ADVERTISED, "Advertised" }, {ISCSI_ROUTER_STATE_MANUAL, "Manual" }, {ISCSI_ROUTER_STATE_STALE, "Stale" }, }; char *iscsi_get_router_state_name(enum iscsi_router_state router_state) { int i; char *state = NULL; for (i = 0; i < ARRAY_SIZE(iscsi_router_state_names); i++) { if (iscsi_router_state_names[i].value == router_state) { state = iscsi_router_state_names[i].name; break; } } return state; } EXPORT_SYMBOL_GPL(iscsi_get_router_state_name); struct iscsi_iface * iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport, uint32_t iface_type, uint32_t iface_num, int dd_size) { struct iscsi_iface *iface; int err; iface = kzalloc(sizeof(*iface) + dd_size, GFP_KERNEL); if (!iface) return NULL; iface->transport = transport; iface->iface_type = iface_type; iface->iface_num = iface_num; iface->dev.release = iscsi_iface_release; iface->dev.class = &iscsi_iface_class; /* parent reference released in iscsi_iface_release */ iface->dev.parent = get_device(&shost->shost_gendev); if (iface_type == ISCSI_IFACE_TYPE_IPV4) dev_set_name(&iface->dev, "ipv4-iface-%u-%u", shost->host_no, iface_num); else dev_set_name(&iface->dev, "ipv6-iface-%u-%u", shost->host_no, iface_num); err = device_register(&iface->dev); if (err) goto put_dev; err = sysfs_create_group(&iface->dev.kobj, &iscsi_iface_group); if (err) goto unreg_iface; if (dd_size) iface->dd_data = &iface[1]; return iface; unreg_iface: device_unregister(&iface->dev); return NULL; put_dev: put_device(&iface->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_iface); void iscsi_destroy_iface(struct iscsi_iface *iface) { sysfs_remove_group(&iface->dev.kobj, &iscsi_iface_group); device_unregister(&iface->dev); } EXPORT_SYMBOL_GPL(iscsi_destroy_iface); /* * Interface to display flash node params to sysfs */ #define ISCSI_FLASHNODE_ATTR(_prefix, _name, _mode, _show, _store) \ struct device_attribute dev_attr_##_prefix##_##_name = \ __ATTR(_name, _mode, _show, _store) /* flash node session attrs show */ #define iscsi_flashnode_sess_attr_show(type, name, param) \ static ssize_t \ show_##type##_##name(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct iscsi_bus_flash_session *fnode_sess = \ iscsi_dev_to_flash_session(dev);\ struct iscsi_transport *t = fnode_sess->transport; \ return t->get_flashnode_param(fnode_sess, param, buf); \ } \ #define iscsi_flashnode_sess_attr(type, name, param) \ iscsi_flashnode_sess_attr_show(type, name, param) \ static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \ show_##type##_##name, NULL); /* Flash node session attributes */ iscsi_flashnode_sess_attr(fnode, auto_snd_tgt_disable, ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE); iscsi_flashnode_sess_attr(fnode, discovery_session, ISCSI_FLASHNODE_DISCOVERY_SESS); iscsi_flashnode_sess_attr(fnode, portal_type, ISCSI_FLASHNODE_PORTAL_TYPE); iscsi_flashnode_sess_attr(fnode, entry_enable, ISCSI_FLASHNODE_ENTRY_EN); iscsi_flashnode_sess_attr(fnode, immediate_data, ISCSI_FLASHNODE_IMM_DATA_EN); iscsi_flashnode_sess_attr(fnode, initial_r2t, ISCSI_FLASHNODE_INITIAL_R2T_EN); iscsi_flashnode_sess_attr(fnode, data_seq_in_order, ISCSI_FLASHNODE_DATASEQ_INORDER); iscsi_flashnode_sess_attr(fnode, data_pdu_in_order, ISCSI_FLASHNODE_PDU_INORDER); iscsi_flashnode_sess_attr(fnode, chap_auth, ISCSI_FLASHNODE_CHAP_AUTH_EN); iscsi_flashnode_sess_attr(fnode, discovery_logout, ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN); iscsi_flashnode_sess_attr(fnode, bidi_chap, ISCSI_FLASHNODE_BIDI_CHAP_EN); iscsi_flashnode_sess_attr(fnode, discovery_auth_optional, ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL); iscsi_flashnode_sess_attr(fnode, erl, ISCSI_FLASHNODE_ERL); iscsi_flashnode_sess_attr(fnode, first_burst_len, ISCSI_FLASHNODE_FIRST_BURST); iscsi_flashnode_sess_attr(fnode, def_time2wait, ISCSI_FLASHNODE_DEF_TIME2WAIT); iscsi_flashnode_sess_attr(fnode, def_time2retain, ISCSI_FLASHNODE_DEF_TIME2RETAIN); iscsi_flashnode_sess_attr(fnode, max_outstanding_r2t, ISCSI_FLASHNODE_MAX_R2T); iscsi_flashnode_sess_attr(fnode, isid, ISCSI_FLASHNODE_ISID); iscsi_flashnode_sess_attr(fnode, tsid, ISCSI_FLASHNODE_TSID); iscsi_flashnode_sess_attr(fnode, max_burst_len, ISCSI_FLASHNODE_MAX_BURST); iscsi_flashnode_sess_attr(fnode, def_taskmgmt_tmo, ISCSI_FLASHNODE_DEF_TASKMGMT_TMO); iscsi_flashnode_sess_attr(fnode, targetalias, ISCSI_FLASHNODE_ALIAS); iscsi_flashnode_sess_attr(fnode, targetname, ISCSI_FLASHNODE_NAME); iscsi_flashnode_sess_attr(fnode, tpgt, ISCSI_FLASHNODE_TPGT); iscsi_flashnode_sess_attr(fnode, discovery_parent_idx, ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX); iscsi_flashnode_sess_attr(fnode, discovery_parent_type, ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE); iscsi_flashnode_sess_attr(fnode, chap_in_idx, ISCSI_FLASHNODE_CHAP_IN_IDX); iscsi_flashnode_sess_attr(fnode, chap_out_idx, ISCSI_FLASHNODE_CHAP_OUT_IDX); iscsi_flashnode_sess_attr(fnode, username, ISCSI_FLASHNODE_USERNAME); iscsi_flashnode_sess_attr(fnode, username_in, ISCSI_FLASHNODE_USERNAME_IN); iscsi_flashnode_sess_attr(fnode, password, ISCSI_FLASHNODE_PASSWORD); iscsi_flashnode_sess_attr(fnode, password_in, ISCSI_FLASHNODE_PASSWORD_IN); iscsi_flashnode_sess_attr(fnode, is_boot_target, ISCSI_FLASHNODE_IS_BOOT_TGT); static struct attribute *iscsi_flashnode_sess_attrs[] = { &dev_attr_fnode_auto_snd_tgt_disable.attr, &dev_attr_fnode_discovery_session.attr, &dev_attr_fnode_portal_type.attr, &dev_attr_fnode_entry_enable.attr, &dev_attr_fnode_immediate_data.attr, &dev_attr_fnode_initial_r2t.attr, &dev_attr_fnode_data_seq_in_order.attr, &dev_attr_fnode_data_pdu_in_order.attr, &dev_attr_fnode_chap_auth.attr, &dev_attr_fnode_discovery_logout.attr, &dev_attr_fnode_bidi_chap.attr, &dev_attr_fnode_discovery_auth_optional.attr, &dev_attr_fnode_erl.attr, &dev_attr_fnode_first_burst_len.attr, &dev_attr_fnode_def_time2wait.attr, &dev_attr_fnode_def_time2retain.attr, &dev_attr_fnode_max_outstanding_r2t.attr, &dev_attr_fnode_isid.attr, &dev_attr_fnode_tsid.attr, &dev_attr_fnode_max_burst_len.attr, &dev_attr_fnode_def_taskmgmt_tmo.attr, &dev_attr_fnode_targetalias.attr, &dev_attr_fnode_targetname.attr, &dev_attr_fnode_tpgt.attr, &dev_attr_fnode_discovery_parent_idx.attr, &dev_attr_fnode_discovery_parent_type.attr, &dev_attr_fnode_chap_in_idx.attr, &dev_attr_fnode_chap_out_idx.attr, &dev_attr_fnode_username.attr, &dev_attr_fnode_username_in.attr, &dev_attr_fnode_password.attr, &dev_attr_fnode_password_in.attr, &dev_attr_fnode_is_boot_target.attr, NULL, }; static umode_t iscsi_flashnode_sess_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *dev = container_of(kobj, struct device, kobj); struct iscsi_bus_flash_session *fnode_sess = iscsi_dev_to_flash_session(dev); struct iscsi_transport *t = fnode_sess->transport; int param; if (attr == &dev_attr_fnode_auto_snd_tgt_disable.attr) { param = ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE; } else if (attr == &dev_attr_fnode_discovery_session.attr) { param = ISCSI_FLASHNODE_DISCOVERY_SESS; } else if (attr == &dev_attr_fnode_portal_type.attr) { param = ISCSI_FLASHNODE_PORTAL_TYPE; } else if (attr == &dev_attr_fnode_entry_enable.attr) { param = ISCSI_FLASHNODE_ENTRY_EN; } else if (attr == &dev_attr_fnode_immediate_data.attr) { param = ISCSI_FLASHNODE_IMM_DATA_EN; } else if (attr == &dev_attr_fnode_initial_r2t.attr) { param = ISCSI_FLASHNODE_INITIAL_R2T_EN; } else if (attr == &dev_attr_fnode_data_seq_in_order.attr) { param = ISCSI_FLASHNODE_DATASEQ_INORDER; } else if (attr == &dev_attr_fnode_data_pdu_in_order.attr) { param = ISCSI_FLASHNODE_PDU_INORDER; } else if (attr == &dev_attr_fnode_chap_auth.attr) { param = ISCSI_FLASHNODE_CHAP_AUTH_EN; } else if (attr == &dev_attr_fnode_discovery_logout.attr) { param = ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN; } else if (attr == &dev_attr_fnode_bidi_chap.attr) { param = ISCSI_FLASHNODE_BIDI_CHAP_EN; } else if (attr == &dev_attr_fnode_discovery_auth_optional.attr) { param = ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL; } else if (attr == &dev_attr_fnode_erl.attr) { param = ISCSI_FLASHNODE_ERL; } else if (attr == &dev_attr_fnode_first_burst_len.attr) { param = ISCSI_FLASHNODE_FIRST_BURST; } else if (attr == &dev_attr_fnode_def_time2wait.attr) { param = ISCSI_FLASHNODE_DEF_TIME2WAIT; } else if (attr == &dev_attr_fnode_def_time2retain.attr) { param = ISCSI_FLASHNODE_DEF_TIME2RETAIN; } else if (attr == &dev_attr_fnode_max_outstanding_r2t.attr) { param = ISCSI_FLASHNODE_MAX_R2T; } else if (attr == &dev_attr_fnode_isid.attr) { param = ISCSI_FLASHNODE_ISID; } else if (attr == &dev_attr_fnode_tsid.attr) { param = ISCSI_FLASHNODE_TSID; } else if (attr == &dev_attr_fnode_max_burst_len.attr) { param = ISCSI_FLASHNODE_MAX_BURST; } else if (attr == &dev_attr_fnode_def_taskmgmt_tmo.attr) { param = ISCSI_FLASHNODE_DEF_TASKMGMT_TMO; } else if (attr == &dev_attr_fnode_targetalias.attr) { param = ISCSI_FLASHNODE_ALIAS; } else if (attr == &dev_attr_fnode_targetname.attr) { param = ISCSI_FLASHNODE_NAME; } else if (attr == &dev_attr_fnode_tpgt.attr) { param = ISCSI_FLASHNODE_TPGT; } else if (attr == &dev_attr_fnode_discovery_parent_idx.attr) { param = ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX; } else if (attr == &dev_attr_fnode_discovery_parent_type.attr) { param = ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE; } else if (attr == &dev_attr_fnode_chap_in_idx.attr) { param = ISCSI_FLASHNODE_CHAP_IN_IDX; } else if (attr == &dev_attr_fnode_chap_out_idx.attr) { param = ISCSI_FLASHNODE_CHAP_OUT_IDX; } else if (attr == &dev_attr_fnode_username.attr) { param = ISCSI_FLASHNODE_USERNAME; } else if (attr == &dev_attr_fnode_username_in.attr) { param = ISCSI_FLASHNODE_USERNAME_IN; } else if (attr == &dev_attr_fnode_password.attr) { param = ISCSI_FLASHNODE_PASSWORD; } else if (attr == &dev_attr_fnode_password_in.attr) { param = ISCSI_FLASHNODE_PASSWORD_IN; } else if (attr == &dev_attr_fnode_is_boot_target.attr) { param = ISCSI_FLASHNODE_IS_BOOT_TGT; } else { WARN_ONCE(1, "Invalid flashnode session attr"); return 0; } return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param); } static struct attribute_group iscsi_flashnode_sess_attr_group = { .attrs = iscsi_flashnode_sess_attrs, .is_visible = iscsi_flashnode_sess_attr_is_visible, }; static const struct attribute_group *iscsi_flashnode_sess_attr_groups[] = { &iscsi_flashnode_sess_attr_group, NULL, }; static void iscsi_flashnode_sess_release(struct device *dev) { struct iscsi_bus_flash_session *fnode_sess = iscsi_dev_to_flash_session(dev); kfree(fnode_sess->targetname); kfree(fnode_sess->targetalias); kfree(fnode_sess->portal_type); kfree(fnode_sess); } static const struct device_type iscsi_flashnode_sess_dev_type = { .name = "iscsi_flashnode_sess_dev_type", .groups = iscsi_flashnode_sess_attr_groups, .release = iscsi_flashnode_sess_release, }; /* flash node connection attrs show */ #define iscsi_flashnode_conn_attr_show(type, name, param) \ static ssize_t \ show_##type##_##name(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);\ struct iscsi_bus_flash_session *fnode_sess = \ iscsi_flash_conn_to_flash_session(fnode_conn);\ struct iscsi_transport *t = fnode_conn->transport; \ return t->get_flashnode_param(fnode_sess, param, buf); \ } \ #define iscsi_flashnode_conn_attr(type, name, param) \ iscsi_flashnode_conn_attr_show(type, name, param) \ static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \ show_##type##_##name, NULL); /* Flash node connection attributes */ iscsi_flashnode_conn_attr(fnode, is_fw_assigned_ipv6, ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6); iscsi_flashnode_conn_attr(fnode, header_digest, ISCSI_FLASHNODE_HDR_DGST_EN); iscsi_flashnode_conn_attr(fnode, data_digest, ISCSI_FLASHNODE_DATA_DGST_EN); iscsi_flashnode_conn_attr(fnode, snack_req, ISCSI_FLASHNODE_SNACK_REQ_EN); iscsi_flashnode_conn_attr(fnode, tcp_timestamp_stat, ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT); iscsi_flashnode_conn_attr(fnode, tcp_nagle_disable, ISCSI_FLASHNODE_TCP_NAGLE_DISABLE); iscsi_flashnode_conn_attr(fnode, tcp_wsf_disable, ISCSI_FLASHNODE_TCP_WSF_DISABLE); iscsi_flashnode_conn_attr(fnode, tcp_timer_scale, ISCSI_FLASHNODE_TCP_TIMER_SCALE); iscsi_flashnode_conn_attr(fnode, tcp_timestamp_enable, ISCSI_FLASHNODE_TCP_TIMESTAMP_EN); iscsi_flashnode_conn_attr(fnode, fragment_disable, ISCSI_FLASHNODE_IP_FRAG_DISABLE); iscsi_flashnode_conn_attr(fnode, keepalive_tmo, ISCSI_FLASHNODE_KEEPALIVE_TMO); iscsi_flashnode_conn_attr(fnode, port, ISCSI_FLASHNODE_PORT); iscsi_flashnode_conn_attr(fnode, ipaddress, ISCSI_FLASHNODE_IPADDR); iscsi_flashnode_conn_attr(fnode, max_recv_dlength, ISCSI_FLASHNODE_MAX_RECV_DLENGTH); iscsi_flashnode_conn_attr(fnode, max_xmit_dlength, ISCSI_FLASHNODE_MAX_XMIT_DLENGTH); iscsi_flashnode_conn_attr(fnode, local_port, ISCSI_FLASHNODE_LOCAL_PORT); iscsi_flashnode_conn_attr(fnode, ipv4_tos, ISCSI_FLASHNODE_IPV4_TOS); iscsi_flashnode_conn_attr(fnode, ipv6_traffic_class, ISCSI_FLASHNODE_IPV6_TC); iscsi_flashnode_conn_attr(fnode, ipv6_flow_label, ISCSI_FLASHNODE_IPV6_FLOW_LABEL); iscsi_flashnode_conn_attr(fnode, redirect_ipaddr, ISCSI_FLASHNODE_REDIRECT_IPADDR); iscsi_flashnode_conn_attr(fnode, max_segment_size, ISCSI_FLASHNODE_MAX_SEGMENT_SIZE); iscsi_flashnode_conn_attr(fnode, link_local_ipv6, ISCSI_FLASHNODE_LINK_LOCAL_IPV6); iscsi_flashnode_conn_attr(fnode, tcp_xmit_wsf, ISCSI_FLASHNODE_TCP_XMIT_WSF); iscsi_flashnode_conn_attr(fnode, tcp_recv_wsf, ISCSI_FLASHNODE_TCP_RECV_WSF); iscsi_flashnode_conn_attr(fnode, statsn, ISCSI_FLASHNODE_STATSN); iscsi_flashnode_conn_attr(fnode, exp_statsn, ISCSI_FLASHNODE_EXP_STATSN); static struct attribute *iscsi_flashnode_conn_attrs[] = { &dev_attr_fnode_is_fw_assigned_ipv6.attr, &dev_attr_fnode_header_digest.attr, &dev_attr_fnode_data_digest.attr, &dev_attr_fnode_snack_req.attr, &dev_attr_fnode_tcp_timestamp_stat.attr, &dev_attr_fnode_tcp_nagle_disable.attr, &dev_attr_fnode_tcp_wsf_disable.attr, &dev_attr_fnode_tcp_timer_scale.attr, &dev_attr_fnode_tcp_timestamp_enable.attr, &dev_attr_fnode_fragment_disable.attr, &dev_attr_fnode_max_recv_dlength.attr, &dev_attr_fnode_max_xmit_dlength.attr, &dev_attr_fnode_keepalive_tmo.attr, &dev_attr_fnode_port.attr, &dev_attr_fnode_ipaddress.attr, &dev_attr_fnode_redirect_ipaddr.attr, &dev_attr_fnode_max_segment_size.attr, &dev_attr_fnode_local_port.attr, &dev_attr_fnode_ipv4_tos.attr, &dev_attr_fnode_ipv6_traffic_class.attr, &dev_attr_fnode_ipv6_flow_label.attr, &dev_attr_fnode_link_local_ipv6.attr, &dev_attr_fnode_tcp_xmit_wsf.attr, &dev_attr_fnode_tcp_recv_wsf.attr, &dev_attr_fnode_statsn.attr, &dev_attr_fnode_exp_statsn.attr, NULL, }; static umode_t iscsi_flashnode_conn_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *dev = container_of(kobj, struct device, kobj); struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev); struct iscsi_transport *t = fnode_conn->transport; int param; if (attr == &dev_attr_fnode_is_fw_assigned_ipv6.attr) { param = ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6; } else if (attr == &dev_attr_fnode_header_digest.attr) { param = ISCSI_FLASHNODE_HDR_DGST_EN; } else if (attr == &dev_attr_fnode_data_digest.attr) { param = ISCSI_FLASHNODE_DATA_DGST_EN; } else if (attr == &dev_attr_fnode_snack_req.attr) { param = ISCSI_FLASHNODE_SNACK_REQ_EN; } else if (attr == &dev_attr_fnode_tcp_timestamp_stat.attr) { param = ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT; } else if (attr == &dev_attr_fnode_tcp_nagle_disable.attr) { param = ISCSI_FLASHNODE_TCP_NAGLE_DISABLE; } else if (attr == &dev_attr_fnode_tcp_wsf_disable.attr) { param = ISCSI_FLASHNODE_TCP_WSF_DISABLE; } else if (attr == &dev_attr_fnode_tcp_timer_scale.attr) { param = ISCSI_FLASHNODE_TCP_TIMER_SCALE; } else if (attr == &dev_attr_fnode_tcp_timestamp_enable.attr) { param = ISCSI_FLASHNODE_TCP_TIMESTAMP_EN; } else if (attr == &dev_attr_fnode_fragment_disable.attr) { param = ISCSI_FLASHNODE_IP_FRAG_DISABLE; } else if (attr == &dev_attr_fnode_max_recv_dlength.attr) { param = ISCSI_FLASHNODE_MAX_RECV_DLENGTH; } else if (attr == &dev_attr_fnode_max_xmit_dlength.attr) { param = ISCSI_FLASHNODE_MAX_XMIT_DLENGTH; } else if (attr == &dev_attr_fnode_keepalive_tmo.attr) { param = ISCSI_FLASHNODE_KEEPALIVE_TMO; } else if (attr == &dev_attr_fnode_port.attr) { param = ISCSI_FLASHNODE_PORT; } else if (attr == &dev_attr_fnode_ipaddress.attr) { param = ISCSI_FLASHNODE_IPADDR; } else if (attr == &dev_attr_fnode_redirect_ipaddr.attr) { param = ISCSI_FLASHNODE_REDIRECT_IPADDR; } else if (attr == &dev_attr_fnode_max_segment_size.attr) { param = ISCSI_FLASHNODE_MAX_SEGMENT_SIZE; } else if (attr == &dev_attr_fnode_local_port.attr) { param = ISCSI_FLASHNODE_LOCAL_PORT; } else if (attr == &dev_attr_fnode_ipv4_tos.attr) { param = ISCSI_FLASHNODE_IPV4_TOS; } else if (attr == &dev_attr_fnode_ipv6_traffic_class.attr) { param = ISCSI_FLASHNODE_IPV6_TC; } else if (attr == &dev_attr_fnode_ipv6_flow_label.attr) { param = ISCSI_FLASHNODE_IPV6_FLOW_LABEL; } else if (attr == &dev_attr_fnode_link_local_ipv6.attr) { param = ISCSI_FLASHNODE_LINK_LOCAL_IPV6; } else if (attr == &dev_attr_fnode_tcp_xmit_wsf.attr) { param = ISCSI_FLASHNODE_TCP_XMIT_WSF; } else if (attr == &dev_attr_fnode_tcp_recv_wsf.attr) { param = ISCSI_FLASHNODE_TCP_RECV_WSF; } else if (attr == &dev_attr_fnode_statsn.attr) { param = ISCSI_FLASHNODE_STATSN; } else if (attr == &dev_attr_fnode_exp_statsn.attr) { param = ISCSI_FLASHNODE_EXP_STATSN; } else { WARN_ONCE(1, "Invalid flashnode connection attr"); return 0; } return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param); } static struct attribute_group iscsi_flashnode_conn_attr_group = { .attrs = iscsi_flashnode_conn_attrs, .is_visible = iscsi_flashnode_conn_attr_is_visible, }; static const struct attribute_group *iscsi_flashnode_conn_attr_groups[] = { &iscsi_flashnode_conn_attr_group, NULL, }; static void iscsi_flashnode_conn_release(struct device *dev) { struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev); kfree(fnode_conn->ipaddress); kfree(fnode_conn->redirect_ipaddr); kfree(fnode_conn->link_local_ipv6_addr); kfree(fnode_conn); } static const struct device_type iscsi_flashnode_conn_dev_type = { .name = "iscsi_flashnode_conn_dev_type", .groups = iscsi_flashnode_conn_attr_groups, .release = iscsi_flashnode_conn_release, }; static const struct bus_type iscsi_flashnode_bus; int iscsi_flashnode_bus_match(struct device *dev, const struct device_driver *drv) { if (dev->bus == &iscsi_flashnode_bus) return 1; return 0; } EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match); static const struct bus_type iscsi_flashnode_bus = { .name = "iscsi_flashnode", .match = &iscsi_flashnode_bus_match, }; /** * iscsi_create_flashnode_sess - Add flashnode session entry in sysfs * @shost: pointer to host data * @index: index of flashnode to add in sysfs * @transport: pointer to transport data * @dd_size: total size to allocate * * Adds a sysfs entry for the flashnode session attributes * * Returns: * pointer to allocated flashnode sess on success * %NULL on failure */ struct iscsi_bus_flash_session * iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index, struct iscsi_transport *transport, int dd_size) { struct iscsi_bus_flash_session *fnode_sess; int err; fnode_sess = kzalloc(sizeof(*fnode_sess) + dd_size, GFP_KERNEL); if (!fnode_sess) return NULL; fnode_sess->transport = transport; fnode_sess->target_id = index; fnode_sess->dev.type = &iscsi_flashnode_sess_dev_type; fnode_sess->dev.bus = &iscsi_flashnode_bus; fnode_sess->dev.parent = &shost->shost_gendev; dev_set_name(&fnode_sess->dev, "flashnode_sess-%u:%u", shost->host_no, index); err = device_register(&fnode_sess->dev); if (err) goto put_dev; if (dd_size) fnode_sess->dd_data = &fnode_sess[1]; return fnode_sess; put_dev: put_device(&fnode_sess->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_flashnode_sess); /** * iscsi_create_flashnode_conn - Add flashnode conn entry in sysfs * @shost: pointer to host data * @fnode_sess: pointer to the parent flashnode session entry * @transport: pointer to transport data * @dd_size: total size to allocate * * Adds a sysfs entry for the flashnode connection attributes * * Returns: * pointer to allocated flashnode conn on success * %NULL on failure */ struct iscsi_bus_flash_conn * iscsi_create_flashnode_conn(struct Scsi_Host *shost, struct iscsi_bus_flash_session *fnode_sess, struct iscsi_transport *transport, int dd_size) { struct iscsi_bus_flash_conn *fnode_conn; int err; fnode_conn = kzalloc(sizeof(*fnode_conn) + dd_size, GFP_KERNEL); if (!fnode_conn) return NULL; fnode_conn->transport = transport; fnode_conn->dev.type = &iscsi_flashnode_conn_dev_type; fnode_conn->dev.bus = &iscsi_flashnode_bus; fnode_conn->dev.parent = &fnode_sess->dev; dev_set_name(&fnode_conn->dev, "flashnode_conn-%u:%u:0", shost->host_no, fnode_sess->target_id); err = device_register(&fnode_conn->dev); if (err) goto put_dev; if (dd_size) fnode_conn->dd_data = &fnode_conn[1]; return fnode_conn; put_dev: put_device(&fnode_conn->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn); /** * iscsi_is_flashnode_conn_dev - verify passed device is to be flashnode conn * @dev: device to verify * @data: pointer to data containing value to use for verification * * Verifies if the passed device is flashnode conn device * * Returns: * 1 on success * 0 on failure */ static int iscsi_is_flashnode_conn_dev(struct device *dev, const void *data) { return dev->bus == &iscsi_flashnode_bus; } static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn) { device_unregister(&fnode_conn->dev); return 0; } static int flashnode_match_index(struct device *dev, const void *data) { struct iscsi_bus_flash_session *fnode_sess = NULL; int ret = 0; if (!iscsi_flashnode_bus_match(dev, NULL)) goto exit_match_index; fnode_sess = iscsi_dev_to_flash_session(dev); ret = (fnode_sess->target_id == *((const int *)data)) ? 1 : 0; exit_match_index: return ret; } /** * iscsi_get_flashnode_by_index -finds flashnode session entry by index * @shost: pointer to host data * @idx: index to match * * Finds the flashnode session object for the passed index * * Returns: * pointer to found flashnode session object on success * %NULL on failure */ static struct iscsi_bus_flash_session * iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx) { struct iscsi_bus_flash_session *fnode_sess = NULL; struct device *dev; dev = device_find_child(&shost->shost_gendev, &idx, flashnode_match_index); if (dev) fnode_sess = iscsi_dev_to_flash_session(dev); return fnode_sess; } /** * iscsi_find_flashnode_sess - finds flashnode session entry * @shost: pointer to host data * @data: pointer to data containing value to use for comparison * @fn: function pointer that does actual comparison * * Finds the flashnode session object comparing the data passed using logic * defined in passed function pointer * * Returns: * pointer to found flashnode session device object on success * %NULL on failure */ struct device * iscsi_find_flashnode_sess(struct Scsi_Host *shost, const void *data, device_match_t fn) { return device_find_child(&shost->shost_gendev, data, fn); } EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess); /** * iscsi_find_flashnode_conn - finds flashnode connection entry * @fnode_sess: pointer to parent flashnode session entry * * Finds the flashnode connection object comparing the data passed using logic * defined in passed function pointer * * Returns: * pointer to found flashnode connection device object on success * %NULL on failure */ struct device * iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess) { return device_find_child(&fnode_sess->dev, NULL, iscsi_is_flashnode_conn_dev); } EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn); static int iscsi_iter_destroy_flashnode_conn_fn(struct device *dev, void *data) { if (!iscsi_is_flashnode_conn_dev(dev, NULL)) return 0; return iscsi_destroy_flashnode_conn(iscsi_dev_to_flash_conn(dev)); } /** * iscsi_destroy_flashnode_sess - destroy flashnode session entry * @fnode_sess: pointer to flashnode session entry to be destroyed * * Deletes the flashnode session entry and all children flashnode connection * entries from sysfs */ void iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess) { int err; err = device_for_each_child(&fnode_sess->dev, NULL, iscsi_iter_destroy_flashnode_conn_fn); if (err) pr_err("Could not delete all connections for %s. Error %d.\n", fnode_sess->dev.kobj.name, err); device_unregister(&fnode_sess->dev); } EXPORT_SYMBOL_GPL(iscsi_destroy_flashnode_sess); static int iscsi_iter_destroy_flashnode_fn(struct device *dev, void *data) { if (!iscsi_flashnode_bus_match(dev, NULL)) return 0; iscsi_destroy_flashnode_sess(iscsi_dev_to_flash_session(dev)); return 0; } /** * iscsi_destroy_all_flashnode - destroy all flashnode session entries * @shost: pointer to host data * * Destroys all the flashnode session entries and all corresponding children * flashnode connection entries from sysfs */ void iscsi_destroy_all_flashnode(struct Scsi_Host *shost) { device_for_each_child(&shost->shost_gendev, NULL, iscsi_iter_destroy_flashnode_fn); } EXPORT_SYMBOL_GPL(iscsi_destroy_all_flashnode); /* * BSG support */ /** * iscsi_bsg_host_dispatch - Dispatch command to LLD. * @job: bsg job to be processed */ static int iscsi_bsg_host_dispatch(struct bsg_job *job) { struct Scsi_Host *shost = iscsi_job_to_shost(job); struct iscsi_bsg_request *req = job->request; struct iscsi_bsg_reply *reply = job->reply; struct iscsi_internal *i = to_iscsi_internal(shost->transportt); int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ int ret; /* check if we have the msgcode value at least */ if (job->request_len < sizeof(uint32_t)) { ret = -ENOMSG; goto fail_host_msg; } /* Validate the host command */ switch (req->msgcode) { case ISCSI_BSG_HST_VENDOR: cmdlen += sizeof(struct iscsi_bsg_host_vendor); if ((shost->hostt->vendor_id == 0L) || (req->rqst_data.h_vendor.vendor_id != shost->hostt->vendor_id)) { ret = -ESRCH; goto fail_host_msg; } break; default: ret = -EBADR; goto fail_host_msg; } /* check if we really have all the request data needed */ if (job->request_len < cmdlen) { ret = -ENOMSG; goto fail_host_msg; } ret = i->iscsi_transport->bsg_request(job); if (!ret) return 0; fail_host_msg: /* return the errno failure code as the only status */ BUG_ON(job->reply_len < sizeof(uint32_t)); reply->reply_payload_rcv_len = 0; reply->result = ret; job->reply_len = sizeof(uint32_t); bsg_job_done(job, ret, 0); return 0; } /** * iscsi_bsg_host_add - Create and add the bsg hooks to receive requests * @shost: shost for iscsi_host * @ihost: iscsi_cls_host adding the structures to */ static int iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost) { struct device *dev = &shost->shost_gendev; struct iscsi_internal *i = to_iscsi_internal(shost->transportt); struct queue_limits lim; struct request_queue *q; char bsg_name[20]; if (!i->iscsi_transport->bsg_request) return -ENOTSUPP; snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no); scsi_init_limits(shost, &lim); q = bsg_setup_queue(dev, bsg_name, &lim, iscsi_bsg_host_dispatch, NULL, 0); if (IS_ERR(q)) { shost_printk(KERN_ERR, shost, "bsg interface failed to " "initialize - no request queue\n"); return PTR_ERR(q); } ihost->bsg_q = q; return 0; } static int iscsi_setup_host(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); struct iscsi_cls_host *ihost = shost->shost_data; memset(ihost, 0, sizeof(*ihost)); mutex_init(&ihost->mutex); iscsi_bsg_host_add(shost, ihost); /* ignore any bsg add error - we just can't do sgio */ return 0; } static int iscsi_remove_host(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); struct iscsi_cls_host *ihost = shost->shost_data; bsg_remove_queue(ihost->bsg_q); return 0; } static DECLARE_TRANSPORT_CLASS(iscsi_host_class, "iscsi_host", iscsi_setup_host, iscsi_remove_host, NULL); static DECLARE_TRANSPORT_CLASS(iscsi_session_class, "iscsi_session", NULL, NULL, NULL); static DECLARE_TRANSPORT_CLASS(iscsi_connection_class, "iscsi_connection", NULL, NULL, NULL); static struct sock *nls; static DEFINE_MUTEX(rx_queue_mutex); static LIST_HEAD(sesslist); static DEFINE_SPINLOCK(sesslock); static LIST_HEAD(connlist); static DEFINE_SPINLOCK(connlock); static uint32_t iscsi_conn_get_sid(struct iscsi_cls_conn *conn) { struct iscsi_cls_session *sess = iscsi_dev_to_session(conn->dev.parent); return sess->sid; } /* * Returns the matching session to a given sid */ static struct iscsi_cls_session *iscsi_session_lookup(uint32_t sid) { unsigned long flags; struct iscsi_cls_session *sess; spin_lock_irqsave(&sesslock, flags); list_for_each_entry(sess, &sesslist, sess_list) { if (sess->sid == sid) { spin_unlock_irqrestore(&sesslock, flags); return sess; } } spin_unlock_irqrestore(&sesslock, flags); return NULL; } /* * Returns the matching connection to a given sid / cid tuple */ static struct iscsi_cls_conn *iscsi_conn_lookup(uint32_t sid, uint32_t cid) { unsigned long flags; struct iscsi_cls_conn *conn; spin_lock_irqsave(&connlock, flags); list_for_each_entry(conn, &connlist, conn_list) { if ((conn->cid == cid) && (iscsi_conn_get_sid(conn) == sid)) { spin_unlock_irqrestore(&connlock, flags); return conn; } } spin_unlock_irqrestore(&connlock, flags); return NULL; } /* * The following functions can be used by LLDs that allocate * their own scsi_hosts or by software iscsi LLDs */ static struct { int value; char *name; } iscsi_session_state_names[] = { { ISCSI_SESSION_LOGGED_IN, "LOGGED_IN" }, { ISCSI_SESSION_FAILED, "FAILED" }, { ISCSI_SESSION_FREE, "FREE" }, }; static const char *iscsi_session_state_name(int state) { int i; char *name = NULL; for (i = 0; i < ARRAY_SIZE(iscsi_session_state_names); i++) { if (iscsi_session_state_names[i].value == state) { name = iscsi_session_state_names[i].name; break; } } return name; } static char *iscsi_session_target_state_name[] = { [ISCSI_SESSION_TARGET_UNBOUND] = "UNBOUND", [ISCSI_SESSION_TARGET_ALLOCATED] = "ALLOCATED", [ISCSI_SESSION_TARGET_SCANNED] = "SCANNED", [ISCSI_SESSION_TARGET_UNBINDING] = "UNBINDING", }; int iscsi_session_chkready(struct iscsi_cls_session *session) { int err; switch (session->state) { case ISCSI_SESSION_LOGGED_IN: err = 0; break; case ISCSI_SESSION_FAILED: err = DID_IMM_RETRY << 16; break; case ISCSI_SESSION_FREE: err = DID_TRANSPORT_FAILFAST << 16; break; default: err = DID_NO_CONNECT << 16; break; } return err; } EXPORT_SYMBOL_GPL(iscsi_session_chkready); int iscsi_is_session_online(struct iscsi_cls_session *session) { unsigned long flags; int ret = 0; spin_lock_irqsave(&session->lock, flags); if (session->state == ISCSI_SESSION_LOGGED_IN) ret = 1; spin_unlock_irqrestore(&session->lock, flags); return ret; } EXPORT_SYMBOL_GPL(iscsi_is_session_online); static void iscsi_session_release(struct device *dev) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev); struct Scsi_Host *shost; shost = iscsi_session_to_shost(session); scsi_host_put(shost); ISCSI_DBG_TRANS_SESSION(session, "Completing session release\n"); kfree(session); } int iscsi_is_session_dev(const struct device *dev) { return dev->release == iscsi_session_release; } EXPORT_SYMBOL_GPL(iscsi_is_session_dev); static int iscsi_iter_session_fn(struct device *dev, void *data) { void (* fn) (struct iscsi_cls_session *) = data; if (!iscsi_is_session_dev(dev)) return 0; fn(iscsi_dev_to_session(dev)); return 0; } void iscsi_host_for_each_session(struct Scsi_Host *shost, void (*fn)(struct iscsi_cls_session *)) { device_for_each_child(&shost->shost_gendev, fn, iscsi_iter_session_fn); } EXPORT_SYMBOL_GPL(iscsi_host_for_each_session); struct iscsi_scan_data { unsigned int channel; unsigned int id; u64 lun; enum scsi_scan_mode rescan; }; static int iscsi_user_scan_session(struct device *dev, void *data) { struct iscsi_scan_data *scan_data = data; struct iscsi_cls_session *session; struct Scsi_Host *shost; struct iscsi_cls_host *ihost; unsigned long flags; unsigned int id; if (!iscsi_is_session_dev(dev)) return 0; session = iscsi_dev_to_session(dev); ISCSI_DBG_TRANS_SESSION(session, "Scanning session\n"); shost = iscsi_session_to_shost(session); ihost = shost->shost_data; mutex_lock(&ihost->mutex); spin_lock_irqsave(&session->lock, flags); if (session->state != ISCSI_SESSION_LOGGED_IN) { spin_unlock_irqrestore(&session->lock, flags); goto user_scan_exit; } id = session->target_id; spin_unlock_irqrestore(&session->lock, flags); if (id != ISCSI_MAX_TARGET) { if ((scan_data->channel == SCAN_WILD_CARD || scan_data->channel == 0) && (scan_data->id == SCAN_WILD_CARD || scan_data->id == id)) { scsi_scan_target(&session->dev, 0, id, scan_data->lun, scan_data->rescan); spin_lock_irqsave(&session->lock, flags); session->target_state = ISCSI_SESSION_TARGET_SCANNED; spin_unlock_irqrestore(&session->lock, flags); } } user_scan_exit: mutex_unlock(&ihost->mutex); ISCSI_DBG_TRANS_SESSION(session, "Completed session scan\n"); return 0; } static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun) { struct iscsi_scan_data scan_data; scan_data.channel = channel; scan_data.id = id; scan_data.lun = lun; scan_data.rescan = SCSI_SCAN_MANUAL; return device_for_each_child(&shost->shost_gendev, &scan_data, iscsi_user_scan_session); } static void iscsi_scan_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, scan_work); struct iscsi_scan_data scan_data; scan_data.channel = 0; scan_data.id = SCAN_WILD_CARD; scan_data.lun = SCAN_WILD_CARD; scan_data.rescan = SCSI_SCAN_RESCAN; iscsi_user_scan_session(&session->dev, &scan_data); } /** * iscsi_block_scsi_eh - block scsi eh until session state has transistioned * @cmd: scsi cmd passed to scsi eh handler * * If the session is down this function will wait for the recovery * timer to fire or for the session to be logged back in. If the * recovery timer fires then FAST_IO_FAIL is returned. The caller * should pass this error value to the scsi eh. */ int iscsi_block_scsi_eh(struct scsi_cmnd *cmd) { struct iscsi_cls_session *session = starget_to_session(scsi_target(cmd->device)); unsigned long flags; int ret = 0; spin_lock_irqsave(&session->lock, flags); while (session->state != ISCSI_SESSION_LOGGED_IN) { if (session->state == ISCSI_SESSION_FREE) { ret = FAST_IO_FAIL; break; } spin_unlock_irqrestore(&session->lock, flags); msleep(1000); spin_lock_irqsave(&session->lock, flags); } spin_unlock_irqrestore(&session->lock, flags); return ret; } EXPORT_SYMBOL_GPL(iscsi_block_scsi_eh); static void session_recovery_timedout(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, recovery_work.work); unsigned long flags; iscsi_cls_session_printk(KERN_INFO, session, "session recovery timed out after %d secs\n", session->recovery_tmo); spin_lock_irqsave(&session->lock, flags); switch (session->state) { case ISCSI_SESSION_FAILED: session->state = ISCSI_SESSION_FREE; break; case ISCSI_SESSION_LOGGED_IN: case ISCSI_SESSION_FREE: /* we raced with the unblock's flush */ spin_unlock_irqrestore(&session->lock, flags); return; } spin_unlock_irqrestore(&session->lock, flags); ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n"); scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n"); if (session->transport->session_recovery_timedout) session->transport->session_recovery_timedout(session); } static void __iscsi_unblock_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, unblock_work); unsigned long flags; ISCSI_DBG_TRANS_SESSION(session, "Unblocking session\n"); cancel_delayed_work_sync(&session->recovery_work); spin_lock_irqsave(&session->lock, flags); session->state = ISCSI_SESSION_LOGGED_IN; spin_unlock_irqrestore(&session->lock, flags); /* start IO */ scsi_target_unblock(&session->dev, SDEV_RUNNING); ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking session\n"); } /** * iscsi_unblock_session - set a session as logged in and start IO. * @session: iscsi session * * Mark a session as ready to accept IO. */ void iscsi_unblock_session(struct iscsi_cls_session *session) { if (!cancel_work_sync(&session->block_work)) cancel_delayed_work_sync(&session->recovery_work); queue_work(session->workq, &session->unblock_work); /* * Blocking the session can be done from any context so we only * queue the block work. Make sure the unblock work has completed * because it flushes/cancels the other works and updates the state. */ flush_work(&session->unblock_work); } EXPORT_SYMBOL_GPL(iscsi_unblock_session); static void __iscsi_block_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, block_work); struct Scsi_Host *shost = iscsi_session_to_shost(session); unsigned long flags; ISCSI_DBG_TRANS_SESSION(session, "Blocking session\n"); spin_lock_irqsave(&session->lock, flags); session->state = ISCSI_SESSION_FAILED; spin_unlock_irqrestore(&session->lock, flags); scsi_block_targets(shost, &session->dev); ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n"); if (session->recovery_tmo >= 0) queue_delayed_work(session->workq, &session->recovery_work, session->recovery_tmo * HZ); } void iscsi_block_session(struct iscsi_cls_session *session) { queue_work(session->workq, &session->block_work); } EXPORT_SYMBOL_GPL(iscsi_block_session); static void __iscsi_unbind_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, unbind_work); struct Scsi_Host *shost = iscsi_session_to_shost(session); struct iscsi_cls_host *ihost = shost->shost_data; unsigned long flags; unsigned int target_id; bool remove_target = true; ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n"); /* Prevent new scans and make sure scanning is not in progress */ mutex_lock(&ihost->mutex); spin_lock_irqsave(&session->lock, flags); if (session->target_state == ISCSI_SESSION_TARGET_ALLOCATED) { remove_target = false; } else if (session->target_state != ISCSI_SESSION_TARGET_SCANNED) { spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); ISCSI_DBG_TRANS_SESSION(session, "Skipping target unbinding: Session is unbound/unbinding.\n"); return; } session->target_state = ISCSI_SESSION_TARGET_UNBINDING; target_id = session->target_id; session->target_id = ISCSI_MAX_TARGET; spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); if (remove_target) scsi_remove_target(&session->dev); if (session->ida_used) ida_free(&iscsi_sess_ida, target_id); iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION); ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n"); spin_lock_irqsave(&session->lock, flags); session->target_state = ISCSI_SESSION_TARGET_UNBOUND; spin_unlock_irqrestore(&session->lock, flags); } static void __iscsi_destroy_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, destroy_work); session->transport->destroy_session(session); } struct iscsi_cls_session * iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport, int dd_size) { struct iscsi_cls_session *session; session = kzalloc(sizeof(*session) + dd_size, GFP_KERNEL); if (!session) return NULL; session->transport = transport; session->creator = -1; session->recovery_tmo = 120; session->recovery_tmo_sysfs_override = false; session->state = ISCSI_SESSION_FREE; INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); INIT_LIST_HEAD(&session->sess_list); INIT_WORK(&session->unblock_work, __iscsi_unblock_session); INIT_WORK(&session->block_work, __iscsi_block_session); INIT_WORK(&session->unbind_work, __iscsi_unbind_session); INIT_WORK(&session->scan_work, iscsi_scan_session); INIT_WORK(&session->destroy_work, __iscsi_destroy_session); spin_lock_init(&session->lock); /* this is released in the dev's release function */ scsi_host_get(shost); session->dev.parent = &shost->shost_gendev; session->dev.release = iscsi_session_release; device_initialize(&session->dev); if (dd_size) session->dd_data = &session[1]; ISCSI_DBG_TRANS_SESSION(session, "Completed session allocation\n"); return session; } EXPORT_SYMBOL_GPL(iscsi_alloc_session); int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) { struct Scsi_Host *shost = iscsi_session_to_shost(session); unsigned long flags; int id = 0; int err; session->sid = atomic_add_return(1, &iscsi_session_nr); session->workq = alloc_workqueue("iscsi_ctrl_%d:%d", WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0, shost->host_no, session->sid); if (!session->workq) return -ENOMEM; if (target_id == ISCSI_MAX_TARGET) { id = ida_alloc(&iscsi_sess_ida, GFP_KERNEL); if (id < 0) { iscsi_cls_session_printk(KERN_ERR, session, "Failure in Target ID Allocation\n"); err = id; goto destroy_wq; } session->target_id = (unsigned int)id; session->ida_used = true; } else session->target_id = target_id; spin_lock_irqsave(&session->lock, flags); session->target_state = ISCSI_SESSION_TARGET_ALLOCATED; spin_unlock_irqrestore(&session->lock, flags); dev_set_name(&session->dev, "session%u", session->sid); err = device_add(&session->dev); if (err) { iscsi_cls_session_printk(KERN_ERR, session, "could not register session's dev\n"); goto release_ida; } err = transport_register_device(&session->dev); if (err) { iscsi_cls_session_printk(KERN_ERR, session, "could not register transport's dev\n"); goto release_dev; } spin_lock_irqsave(&sesslock, flags); list_add(&session->sess_list, &sesslist); spin_unlock_irqrestore(&sesslock, flags); iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION); ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n"); return 0; release_dev: device_del(&session->dev); release_ida: if (session->ida_used) ida_free(&iscsi_sess_ida, session->target_id); destroy_wq: destroy_workqueue(session->workq); return err; } EXPORT_SYMBOL_GPL(iscsi_add_session); static void iscsi_conn_release(struct device *dev) { struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev); struct device *parent = conn->dev.parent; ISCSI_DBG_TRANS_CONN(conn, "Releasing conn\n"); kfree(conn); put_device(parent); } static int iscsi_is_conn_dev(const struct device *dev) { return dev->release == iscsi_conn_release; } static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data) { if (!iscsi_is_conn_dev(dev)) return 0; iscsi_remove_conn(iscsi_dev_to_conn(dev)); iscsi_put_conn(iscsi_dev_to_conn(dev)); return 0; } void iscsi_remove_session(struct iscsi_cls_session *session) { unsigned long flags; int err; ISCSI_DBG_TRANS_SESSION(session, "Removing session\n"); spin_lock_irqsave(&sesslock, flags); if (!list_empty(&session->sess_list)) list_del(&session->sess_list); spin_unlock_irqrestore(&sesslock, flags); if (!cancel_work_sync(&session->block_work)) cancel_delayed_work_sync(&session->recovery_work); cancel_work_sync(&session->unblock_work); /* * If we are blocked let commands flow again. The lld or iscsi * layer should set up the queuecommand to fail commands. * We assume that LLD will not be calling block/unblock while * removing the session. */ spin_lock_irqsave(&session->lock, flags); session->state = ISCSI_SESSION_FREE; spin_unlock_irqrestore(&session->lock, flags); scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); /* * qla4xxx can perform it's own scans when it runs in kernel only * mode. Make sure to flush those scans. */ flush_work(&session->scan_work); /* flush running unbind operations */ flush_work(&session->unbind_work); __iscsi_unbind_session(&session->unbind_work); /* hw iscsi may not have removed all connections from session */ err = device_for_each_child(&session->dev, NULL, iscsi_iter_destroy_conn_fn); if (err) iscsi_cls_session_printk(KERN_ERR, session, "Could not delete all connections " "for session. Error %d.\n", err); transport_unregister_device(&session->dev); destroy_workqueue(session->workq); ISCSI_DBG_TRANS_SESSION(session, "Completing session removal\n"); device_del(&session->dev); } EXPORT_SYMBOL_GPL(iscsi_remove_session); static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag) { ISCSI_DBG_TRANS_CONN(conn, "Stopping conn.\n"); switch (flag) { case STOP_CONN_RECOVER: WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); break; case STOP_CONN_TERM: WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); break; default: iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n", flag); return; } conn->transport->stop_conn(conn, flag); ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n"); } static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active) { struct iscsi_cls_session *session = iscsi_conn_to_session(conn); struct iscsi_endpoint *ep; ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n"); WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); if (!conn->ep || !session->transport->ep_disconnect) return; ep = conn->ep; conn->ep = NULL; session->transport->unbind_conn(conn, is_active); session->transport->ep_disconnect(ep); ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n"); } static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn, struct iscsi_endpoint *ep, bool is_active) { /* Check if this was a conn error and the kernel took ownership */ spin_lock_irq(&conn->lock); if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { spin_unlock_irq(&conn->lock); iscsi_ep_disconnect(conn, is_active); } else { spin_unlock_irq(&conn->lock); ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n"); mutex_unlock(&conn->ep_mutex); flush_work(&conn->cleanup_work); /* * Userspace is now done with the EP so we can release the ref * iscsi_cleanup_conn_work_fn took. */ iscsi_put_endpoint(ep); mutex_lock(&conn->ep_mutex); } } static int iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag) { ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n"); /* * For offload, iscsid may not know about the ep like when iscsid is * restarted or for kernel based session shutdown iscsid is not even * up. For these cases, we do the disconnect now. */ mutex_lock(&conn->ep_mutex); if (conn->ep) iscsi_if_disconnect_bound_ep(conn, conn->ep, true); mutex_unlock(&conn->ep_mutex); /* * If this is a termination we have to call stop_conn with that flag * so the correct states get set. If we haven't run the work yet try to * avoid the extra run. */ if (flag == STOP_CONN_TERM) { cancel_work_sync(&conn->cleanup_work); iscsi_stop_conn(conn, flag); } else { /* * Figure out if it was the kernel or userspace initiating this. */ spin_lock_irq(&conn->lock); if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { spin_unlock_irq(&conn->lock); iscsi_stop_conn(conn, flag); } else { spin_unlock_irq(&conn->lock); ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n"); flush_work(&conn->cleanup_work); } /* * Only clear for recovery to avoid extra cleanup runs during * termination. */ spin_lock_irq(&conn->lock); clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags); spin_unlock_irq(&conn->lock); } ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n"); return 0; } static void iscsi_cleanup_conn_work_fn(struct work_struct *work) { struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn, cleanup_work); struct iscsi_cls_session *session = iscsi_conn_to_session(conn); mutex_lock(&conn->ep_mutex); /* * Get a ref to the ep, so we don't release its ID until after * userspace is done referencing it in iscsi_if_disconnect_bound_ep. */ if (conn->ep) get_device(&conn->ep->dev); iscsi_ep_disconnect(conn, false); if (system_state != SYSTEM_RUNNING) { /* * If the user has set up for the session to never timeout * then hang like they wanted. For all other cases fail right * away since userspace is not going to relogin. */ if (session->recovery_tmo > 0) session->recovery_tmo = 0; } iscsi_stop_conn(conn, STOP_CONN_RECOVER); mutex_unlock(&conn->ep_mutex); ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n"); } static int iscsi_iter_force_destroy_conn_fn(struct device *dev, void *data) { struct iscsi_transport *transport; struct iscsi_cls_conn *conn; if (!iscsi_is_conn_dev(dev)) return 0; conn = iscsi_dev_to_conn(dev); transport = conn->transport; if (READ_ONCE(conn->state) != ISCSI_CONN_DOWN) iscsi_if_stop_conn(conn, STOP_CONN_TERM); transport->destroy_conn(conn); return 0; } /** * iscsi_force_destroy_session - destroy a session from the kernel * @session: session to destroy * * Force the destruction of a session from the kernel. This should only be * used when userspace is no longer running during system shutdown. */ void iscsi_force_destroy_session(struct iscsi_cls_session *session) { struct iscsi_transport *transport = session->transport; unsigned long flags; WARN_ON_ONCE(system_state == SYSTEM_RUNNING); spin_lock_irqsave(&sesslock, flags); if (list_empty(&session->sess_list)) { spin_unlock_irqrestore(&sesslock, flags); /* * Conn/ep is already freed. Session is being torn down via * async path. For shutdown we don't care about it so return. */ return; } spin_unlock_irqrestore(&sesslock, flags); device_for_each_child(&session->dev, NULL, iscsi_iter_force_destroy_conn_fn); transport->destroy_session(session); } EXPORT_SYMBOL_GPL(iscsi_force_destroy_session); void iscsi_free_session(struct iscsi_cls_session *session) { ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n"); iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION); put_device(&session->dev); } EXPORT_SYMBOL_GPL(iscsi_free_session); /** * iscsi_alloc_conn - alloc iscsi class connection * @session: iscsi cls session * @dd_size: private driver data size * @cid: connection id */ struct iscsi_cls_conn * iscsi_alloc_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) { struct iscsi_transport *transport = session->transport; struct iscsi_cls_conn *conn; conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL); if (!conn) return NULL; if (dd_size) conn->dd_data = &conn[1]; mutex_init(&conn->ep_mutex); spin_lock_init(&conn->lock); INIT_LIST_HEAD(&conn->conn_list); INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn); conn->transport = transport; conn->cid = cid; WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); /* this is released in the dev's release function */ if (!get_device(&session->dev)) goto free_conn; dev_set_name(&conn->dev, "connection%d:%u", session->sid, cid); device_initialize(&conn->dev); conn->dev.parent = &session->dev; conn->dev.release = iscsi_conn_release; return conn; free_conn: kfree(conn); return NULL; } EXPORT_SYMBOL_GPL(iscsi_alloc_conn); /** * iscsi_add_conn - add iscsi class connection * @conn: iscsi cls connection * * This will expose iscsi_cls_conn to sysfs so make sure the related * resources for sysfs attributes are initialized before calling this. */ int iscsi_add_conn(struct iscsi_cls_conn *conn) { int err; unsigned long flags; struct iscsi_cls_session *session = iscsi_dev_to_session(conn->dev.parent); err = device_add(&conn->dev); if (err) { iscsi_cls_session_printk(KERN_ERR, session, "could not register connection's dev\n"); return err; } err = transport_register_device(&conn->dev); if (err) { iscsi_cls_session_printk(KERN_ERR, session, "could not register transport's dev\n"); device_del(&conn->dev); return err; } spin_lock_irqsave(&connlock, flags); list_add(&conn->conn_list, &connlist); spin_unlock_irqrestore(&connlock, flags); return 0; } EXPORT_SYMBOL_GPL(iscsi_add_conn); /** * iscsi_remove_conn - remove iscsi class connection from sysfs * @conn: iscsi cls connection * * Remove iscsi_cls_conn from sysfs, and wait for previous * read/write of iscsi_cls_conn's attributes in sysfs to finish. */ void iscsi_remove_conn(struct iscsi_cls_conn *conn) { unsigned long flags; spin_lock_irqsave(&connlock, flags); list_del(&conn->conn_list); spin_unlock_irqrestore(&connlock, flags); transport_unregister_device(&conn->dev); device_del(&conn->dev); } EXPORT_SYMBOL_GPL(iscsi_remove_conn); void iscsi_put_conn(struct iscsi_cls_conn *conn) { put_device(&conn->dev); } EXPORT_SYMBOL_GPL(iscsi_put_conn); void iscsi_get_conn(struct iscsi_cls_conn *conn) { get_device(&conn->dev); } EXPORT_SYMBOL_GPL(iscsi_get_conn); /* * iscsi interface functions */ static struct iscsi_internal * iscsi_if_transport_lookup(struct iscsi_transport *tt) { struct iscsi_internal *priv; unsigned long flags; spin_lock_irqsave(&iscsi_transport_lock, flags); list_for_each_entry(priv, &iscsi_transports, list) { if (tt == priv->iscsi_transport) { spin_unlock_irqrestore(&iscsi_transport_lock, flags); return priv; } } spin_unlock_irqrestore(&iscsi_transport_lock, flags); return NULL; } static int iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp) { return nlmsg_multicast(nls, skb, 0, group, gfp); } static int iscsi_unicast_skb(struct sk_buff *skb, u32 portid) { return nlmsg_unicast(nls, skb, portid); } int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; char *pdu; struct iscsi_internal *priv; int len = nlmsg_total_size(sizeof(*ev) + sizeof(struct iscsi_hdr) + data_size); priv = iscsi_if_transport_lookup(conn->transport); if (!priv) return -EINVAL; skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { iscsi_conn_error_event(conn, ISCSI_ERR_CONN_FAILED); iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver " "control PDU: OOM\n"); return -ENOMEM; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); memset(ev, 0, sizeof(*ev)); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_RECV_PDU; ev->r.recv_req.cid = conn->cid; ev->r.recv_req.sid = iscsi_conn_get_sid(conn); pdu = (char*)ev + sizeof(*ev); memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); } EXPORT_SYMBOL_GPL(iscsi_recv_pdu); int iscsi_offload_mesg(struct Scsi_Host *shost, struct iscsi_transport *transport, uint32_t type, char *data, uint16_t data_size) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; int len = nlmsg_total_size(sizeof(*ev) + data_size); skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { printk(KERN_ERR "can not deliver iscsi offload message:OOM\n"); return -ENOMEM; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); memset(ev, 0, sizeof(*ev)); ev->type = type; ev->transport_handle = iscsi_handle(transport); switch (type) { case ISCSI_KEVENT_PATH_REQ: ev->r.req_path.host_no = shost->host_no; break; case ISCSI_KEVENT_IF_DOWN: ev->r.notify_if_down.host_no = shost->host_no; break; } memcpy((char *)ev + sizeof(*ev), data, data_size); return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_ATOMIC); } EXPORT_SYMBOL_GPL(iscsi_offload_mesg); void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; struct iscsi_internal *priv; int len = nlmsg_total_size(sizeof(*ev)); unsigned long flags; int state; spin_lock_irqsave(&conn->lock, flags); /* * Userspace will only do a stop call if we are at least bound. And, we * only need to do the in kernel cleanup if in the UP state so cmds can * be released to upper layers. If in other states just wait for * userspace to avoid races that can leave the cleanup_work queued. */ state = READ_ONCE(conn->state); switch (state) { case ISCSI_CONN_BOUND: case ISCSI_CONN_UP: if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work); } break; default: ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n", state); break; } spin_unlock_irqrestore(&conn->lock, flags); priv = iscsi_if_transport_lookup(conn->transport); if (!priv) return; skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " "conn error (%d)\n", error); return; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_CONN_ERROR; ev->r.connerror.error = error; ev->r.connerror.cid = conn->cid; ev->r.connerror.sid = iscsi_conn_get_sid(conn); iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n", error); } EXPORT_SYMBOL_GPL(iscsi_conn_error_event); void iscsi_conn_login_event(struct iscsi_cls_conn *conn, enum iscsi_conn_state state) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; struct iscsi_internal *priv; int len = nlmsg_total_size(sizeof(*ev)); priv = iscsi_if_transport_lookup(conn->transport); if (!priv) return; skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " "conn login (%d)\n", state); return; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE; ev->r.conn_login.state = state; ev->r.conn_login.cid = conn->cid; ev->r.conn_login.sid = iscsi_conn_get_sid(conn); iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn login (%d)\n", state); } EXPORT_SYMBOL_GPL(iscsi_conn_login_event); void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport, enum iscsi_host_event_code code, uint32_t data_size, uint8_t *data) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; int len = nlmsg_total_size(sizeof(*ev) + data_size); skb = alloc_skb(len, GFP_NOIO); if (!skb) { printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n", host_no, code); return; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(transport); ev->type = ISCSI_KEVENT_HOST_EVENT; ev->r.host_event.host_no = host_no; ev->r.host_event.code = code; ev->r.host_event.data_size = data_size; if (data_size) memcpy((char *)ev + sizeof(*ev), data, data_size); iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO); } EXPORT_SYMBOL_GPL(iscsi_post_host_event); void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport, uint32_t status, uint32_t pid, uint32_t data_size, uint8_t *data) { struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; int len = nlmsg_total_size(sizeof(*ev) + data_size); skb = alloc_skb(len, GFP_NOIO); if (!skb) { printk(KERN_ERR "gracefully ignored ping comp: OOM\n"); return; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(transport); ev->type = ISCSI_KEVENT_PING_COMP; ev->r.ping_comp.host_no = host_no; ev->r.ping_comp.status = status; ev->r.ping_comp.pid = pid; ev->r.ping_comp.data_size = data_size; memcpy((char *)ev + sizeof(*ev), data, data_size); iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO); } EXPORT_SYMBOL_GPL(iscsi_ping_comp_event); static int iscsi_if_send_reply(u32 portid, int type, void *payload, int size) { struct sk_buff *skb; struct nlmsghdr *nlh; int len = nlmsg_total_size(size); skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { printk(KERN_ERR "Could not allocate skb to send reply.\n"); return -ENOMEM; } nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0); memcpy(nlmsg_data(nlh), payload, size); return iscsi_unicast_skb(skb, portid); } static int iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) { struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_stats *stats; struct sk_buff *skbstat; struct iscsi_cls_conn *conn; struct nlmsghdr *nlhstat; struct iscsi_uevent *evstat; struct iscsi_internal *priv; int len = nlmsg_total_size(sizeof(*ev) + sizeof(struct iscsi_stats) + sizeof(struct iscsi_stats_custom) * ISCSI_STATS_CUSTOM_MAX); int err = 0; priv = iscsi_if_transport_lookup(transport); if (!priv) return -EINVAL; conn = iscsi_conn_lookup(ev->u.get_stats.sid, ev->u.get_stats.cid); if (!conn) return -EEXIST; do { int actual_size; skbstat = alloc_skb(len, GFP_ATOMIC); if (!skbstat) { iscsi_cls_conn_printk(KERN_ERR, conn, "can not " "deliver stats: OOM\n"); return -ENOMEM; } nlhstat = __nlmsg_put(skbstat, 0, 0, 0, (len - sizeof(*nlhstat)), 0); evstat = nlmsg_data(nlhstat); memset(evstat, 0, sizeof(*evstat)); evstat->transport_handle = iscsi_handle(conn->transport); evstat->type = nlh->nlmsg_type; evstat->u.get_stats.cid = ev->u.get_stats.cid; evstat->u.get_stats.sid = ev->u.get_stats.sid; stats = (struct iscsi_stats *) ((char*)evstat + sizeof(*evstat)); memset(stats, 0, sizeof(*stats)); transport->get_stats(conn, stats); actual_size = nlmsg_total_size(sizeof(struct iscsi_uevent) + sizeof(struct iscsi_stats) + sizeof(struct iscsi_stats_custom) * stats->custom_length); actual_size -= sizeof(*nlhstat); actual_size = nlmsg_msg_size(actual_size); skb_trim(skbstat, NLMSG_ALIGN(actual_size)); nlhstat->nlmsg_len = actual_size; err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); } while (err < 0 && err != -ECONNREFUSED); return err; } /** * iscsi_session_event - send session destr. completion event * @session: iscsi class session * @event: type of event */ int iscsi_session_event(struct iscsi_cls_session *session, enum iscsi_uevent_e event) { struct iscsi_internal *priv; struct Scsi_Host *shost; struct iscsi_uevent *ev; struct sk_buff *skb; struct nlmsghdr *nlh; int rc, len = nlmsg_total_size(sizeof(*ev)); priv = iscsi_if_transport_lookup(session->transport); if (!priv) return -EINVAL; shost = iscsi_session_to_shost(session); skb = alloc_skb(len, GFP_KERNEL); if (!skb) { iscsi_cls_session_printk(KERN_ERR, session, "Cannot notify userspace of session " "event %u\n", event); return -ENOMEM; } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(session->transport); ev->type = event; switch (event) { case ISCSI_KEVENT_DESTROY_SESSION: ev->r.d_session.host_no = shost->host_no; ev->r.d_session.sid = session->sid; break; case ISCSI_KEVENT_CREATE_SESSION: ev->r.c_session_ret.host_no = shost->host_no; ev->r.c_session_ret.sid = session->sid; break; case ISCSI_KEVENT_UNBIND_SESSION: ev->r.unbind_session.host_no = shost->host_no; ev->r.unbind_session.sid = session->sid; break; default: iscsi_cls_session_printk(KERN_ERR, session, "Invalid event " "%u.\n", event); kfree_skb(skb); return -EINVAL; } /* * this will occur if the daemon is not up, so we just warn * the user and when the daemon is restarted it will handle it */ rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); if (rc == -ESRCH) iscsi_cls_session_printk(KERN_ERR, session, "Cannot notify userspace of session " "event %u. Check iscsi daemon\n", event); ISCSI_DBG_TRANS_SESSION(session, "Completed handling event %d rc %d\n", event, rc); return rc; } EXPORT_SYMBOL_GPL(iscsi_session_event); static int iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep, struct iscsi_uevent *ev, pid_t pid, uint32_t initial_cmdsn, uint16_t cmds_max, uint16_t queue_depth) { struct iscsi_transport *transport = priv->iscsi_transport; struct iscsi_cls_session *session; struct Scsi_Host *shost; session = transport->create_session(ep, cmds_max, queue_depth, initial_cmdsn); if (!session) return -ENOMEM; session->creator = pid; shost = iscsi_session_to_shost(session); ev->r.c_session_ret.host_no = shost->host_no; ev->r.c_session_ret.sid = session->sid; ISCSI_DBG_TRANS_SESSION(session, "Completed creating transport session\n"); return 0; } static int iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct iscsi_cls_conn *conn; struct iscsi_cls_session *session; session = iscsi_session_lookup(ev->u.c_conn.sid); if (!session) { printk(KERN_ERR "iscsi: invalid session %d.\n", ev->u.c_conn.sid); return -EINVAL; } conn = transport->create_conn(session, ev->u.c_conn.cid); if (!conn) { iscsi_cls_session_printk(KERN_ERR, session, "couldn't create a new connection."); return -ENOMEM; } ev->r.c_conn_ret.sid = session->sid; ev->r.c_conn_ret.cid = conn->cid; ISCSI_DBG_TRANS_CONN(conn, "Completed creating transport conn\n"); return 0; } static int iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct iscsi_cls_conn *conn; conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid); if (!conn) return -EINVAL; ISCSI_DBG_TRANS_CONN(conn, "Flushing cleanup during destruction\n"); flush_work(&conn->cleanup_work); ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n"); if (transport->destroy_conn) transport->destroy_conn(conn); return 0; } static int iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) { char *data = (char*)ev + sizeof(*ev); struct iscsi_cls_conn *conn; struct iscsi_cls_session *session; int err = 0, value = 0, state; if (ev->u.set_param.len > rlen || ev->u.set_param.len > PAGE_SIZE) return -EINVAL; session = iscsi_session_lookup(ev->u.set_param.sid); conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid); if (!conn || !session) return -EINVAL; /* data will be regarded as NULL-ended string, do length check */ if (strlen(data) > ev->u.set_param.len) return -EINVAL; switch (ev->u.set_param.param) { case ISCSI_PARAM_SESS_RECOVERY_TMO: sscanf(data, "%d", &value); if (!session->recovery_tmo_sysfs_override) session->recovery_tmo = value; break; default: state = READ_ONCE(conn->state); if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) { err = transport->set_param(conn, ev->u.set_param.param, data, ev->u.set_param.len); } else { return -ENOTCONN; } } return err; } static int iscsi_if_ep_connect(struct iscsi_transport *transport, struct iscsi_uevent *ev, int msg_type) { struct iscsi_endpoint *ep; struct sockaddr *dst_addr; struct Scsi_Host *shost = NULL; int non_blocking, err = 0; if (!transport->ep_connect) return -EINVAL; if (msg_type == ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST) { shost = scsi_host_lookup(ev->u.ep_connect_through_host.host_no); if (!shost) { printk(KERN_ERR "ep connect failed. Could not find " "host no %u\n", ev->u.ep_connect_through_host.host_no); return -ENODEV; } non_blocking = ev->u.ep_connect_through_host.non_blocking; } else non_blocking = ev->u.ep_connect.non_blocking; dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); ep = transport->ep_connect(shost, dst_addr, non_blocking); if (IS_ERR(ep)) { err = PTR_ERR(ep); goto release_host; } ev->r.ep_connect_ret.handle = ep->id; release_host: if (shost) scsi_host_put(shost); return err; } static int iscsi_if_ep_disconnect(struct iscsi_transport *transport, u64 ep_handle) { struct iscsi_cls_conn *conn; struct iscsi_endpoint *ep; if (!transport->ep_disconnect) return -EINVAL; ep = iscsi_lookup_endpoint(ep_handle); if (!ep) return -EINVAL; conn = ep->conn; if (!conn) { /* * conn was not even bound yet, so we can't get iscsi conn * failures yet. */ transport->ep_disconnect(ep); goto put_ep; } mutex_lock(&conn->ep_mutex); iscsi_if_disconnect_bound_ep(conn, ep, false); mutex_unlock(&conn->ep_mutex); put_ep: iscsi_put_endpoint(ep); return 0; } static int iscsi_if_transport_ep(struct iscsi_transport *transport, struct iscsi_uevent *ev, int msg_type, u32 rlen) { struct iscsi_endpoint *ep; int rc = 0; switch (msg_type) { case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST: case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: if (rlen < sizeof(struct sockaddr)) rc = -EINVAL; else rc = iscsi_if_ep_connect(transport, ev, msg_type); break; case ISCSI_UEVENT_TRANSPORT_EP_POLL: if (!transport->ep_poll) return -EINVAL; ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle); if (!ep) return -EINVAL; ev->r.retcode = transport->ep_poll(ep, ev->u.ep_poll.timeout_ms); iscsi_put_endpoint(ep); break; case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: rc = iscsi_if_ep_disconnect(transport, ev->u.ep_disconnect.ep_handle); break; } return rc; } static int iscsi_tgt_dscvr(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) { struct Scsi_Host *shost; struct sockaddr *dst_addr; int err; if (rlen < sizeof(*dst_addr)) return -EINVAL; if (!transport->tgt_dscvr) return -EINVAL; shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no); if (!shost) { printk(KERN_ERR "target discovery could not find host no %u\n", ev->u.tgt_dscvr.host_no); return -ENODEV; } dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); err = transport->tgt_dscvr(shost, ev->u.tgt_dscvr.type, ev->u.tgt_dscvr.enable, dst_addr); scsi_host_put(shost); return err; } static int iscsi_set_host_param(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) { char *data = (char*)ev + sizeof(*ev); struct Scsi_Host *shost; int err; if (!transport->set_host_param) return -ENOSYS; if (ev->u.set_host_param.len > rlen || ev->u.set_host_param.len > PAGE_SIZE) return -EINVAL; shost = scsi_host_lookup(ev->u.set_host_param.host_no); if (!shost) { printk(KERN_ERR "set_host_param could not find host no %u\n", ev->u.set_host_param.host_no); return -ENODEV; } /* see similar check in iscsi_if_set_param() */ if (strlen(data) > ev->u.set_host_param.len) { err = -EINVAL; goto out; } err = transport->set_host_param(shost, ev->u.set_host_param.param, data, ev->u.set_host_param.len); out: scsi_host_put(shost); return err; } static int iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) { struct Scsi_Host *shost; struct iscsi_path *params; int err; if (rlen < sizeof(*params)) return -EINVAL; if (!transport->set_path) return -ENOSYS; shost = scsi_host_lookup(ev->u.set_path.host_no); if (!shost) { printk(KERN_ERR "set path could not find host no %u\n", ev->u.set_path.host_no); return -ENODEV; } params = (struct iscsi_path *)((char *)ev + sizeof(*ev)); err = transport->set_path(shost, params); scsi_host_put(shost); return err; } static int iscsi_session_has_conns(int sid) { struct iscsi_cls_conn *conn; unsigned long flags; int found = 0; spin_lock_irqsave(&connlock, flags); list_for_each_entry(conn, &connlist, conn_list) { if (iscsi_conn_get_sid(conn) == sid) { found = 1; break; } } spin_unlock_irqrestore(&connlock, flags); return found; } static int iscsi_set_iface_params(struct iscsi_transport *transport, struct iscsi_uevent *ev, uint32_t len) { char *data = (char *)ev + sizeof(*ev); struct Scsi_Host *shost; int err; if (!transport->set_iface_param) return -ENOSYS; shost = scsi_host_lookup(ev->u.set_iface_params.host_no); if (!shost) { printk(KERN_ERR "set_iface_params could not find host no %u\n", ev->u.set_iface_params.host_no); return -ENODEV; } err = transport->set_iface_param(shost, data, len); scsi_host_put(shost); return err; } static int iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen) { struct Scsi_Host *shost; struct sockaddr *dst_addr; int err; if (rlen < sizeof(*dst_addr)) return -EINVAL; if (!transport->send_ping) return -ENOSYS; shost = scsi_host_lookup(ev->u.iscsi_ping.host_no); if (!shost) { printk(KERN_ERR "iscsi_ping could not find host no %u\n", ev->u.iscsi_ping.host_no); return -ENODEV; } dst_addr = (struct sockaddr *)((char *)ev + sizeof(*ev)); err = transport->send_ping(shost, ev->u.iscsi_ping.iface_num, ev->u.iscsi_ping.iface_type, ev->u.iscsi_ping.payload_size, ev->u.iscsi_ping.pid, dst_addr); scsi_host_put(shost); return err; } static int iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh) { struct iscsi_uevent *ev = nlmsg_data(nlh); struct Scsi_Host *shost = NULL; struct iscsi_chap_rec *chap_rec; struct iscsi_internal *priv; struct sk_buff *skbchap; struct nlmsghdr *nlhchap; struct iscsi_uevent *evchap; uint32_t chap_buf_size; int len, err = 0; char *buf; if (!transport->get_chap) return -EINVAL; priv = iscsi_if_transport_lookup(transport); if (!priv) return -EINVAL; chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec)); len = nlmsg_total_size(sizeof(*ev) + chap_buf_size); shost = scsi_host_lookup(ev->u.get_chap.host_no); if (!shost) { printk(KERN_ERR "%s: failed. Could not find host no %u\n", __func__, ev->u.get_chap.host_no); return -ENODEV; } do { int actual_size; skbchap = alloc_skb(len, GFP_KERNEL); if (!skbchap) { printk(KERN_ERR "can not deliver chap: OOM\n"); err = -ENOMEM; goto exit_get_chap; } nlhchap = __nlmsg_put(skbchap, 0, 0, 0, (len - sizeof(*nlhchap)), 0); evchap = nlmsg_data(nlhchap); memset(evchap, 0, sizeof(*evchap)); evchap->transport_handle = iscsi_handle(transport); evchap->type = nlh->nlmsg_type; evchap->u.get_chap.host_no = ev->u.get_chap.host_no; evchap->u.get_chap.chap_tbl_idx = ev->u.get_chap.chap_tbl_idx; evchap->u.get_chap.num_entries = ev->u.get_chap.num_entries; buf = (char *)evchap + sizeof(*evchap); memset(buf, 0, chap_buf_size); err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx, &evchap->u.get_chap.num_entries, buf); actual_size = nlmsg_total_size(sizeof(*ev) + chap_buf_size); skb_trim(skbchap, NLMSG_ALIGN(actual_size)); nlhchap->nlmsg_len = actual_size; err = iscsi_multicast_skb(skbchap, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); } while (err < 0 && err != -ECONNREFUSED); exit_get_chap: scsi_host_put(shost); return err; } static int iscsi_set_chap(struct iscsi_transport *transport, struct iscsi_uevent *ev, uint32_t len) { char *data = (char *)ev + sizeof(*ev); struct Scsi_Host *shost; int err = 0; if (!transport->set_chap) return -ENOSYS; shost = scsi_host_lookup(ev->u.set_path.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.set_path.host_no); return -ENODEV; } err = transport->set_chap(shost, data, len); scsi_host_put(shost); return err; } static int iscsi_delete_chap(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; int err = 0; if (!transport->delete_chap) return -ENOSYS; shost = scsi_host_lookup(ev->u.delete_chap.host_no); if (!shost) { printk(KERN_ERR "%s could not find host no %u\n", __func__, ev->u.delete_chap.host_no); return -ENODEV; } err = transport->delete_chap(shost, ev->u.delete_chap.chap_tbl_idx); scsi_host_put(shost); return err; } static const struct { enum iscsi_discovery_parent_type value; char *name; } iscsi_discovery_parent_names[] = { {ISCSI_DISC_PARENT_UNKNOWN, "Unknown" }, {ISCSI_DISC_PARENT_SENDTGT, "Sendtarget" }, {ISCSI_DISC_PARENT_ISNS, "isns" }, }; char *iscsi_get_discovery_parent_name(int parent_type) { int i; char *state = "Unknown!"; for (i = 0; i < ARRAY_SIZE(iscsi_discovery_parent_names); i++) { if (iscsi_discovery_parent_names[i].value & parent_type) { state = iscsi_discovery_parent_names[i].name; break; } } return state; } EXPORT_SYMBOL_GPL(iscsi_get_discovery_parent_name); static int iscsi_set_flashnode_param(struct iscsi_transport *transport, struct iscsi_uevent *ev, uint32_t len) { char *data = (char *)ev + sizeof(*ev); struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; uint32_t idx; int err = 0; if (!transport->set_flashnode_param) { err = -ENOSYS; goto exit_set_fnode; } shost = scsi_host_lookup(ev->u.set_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.set_flashnode.host_no); err = -ENODEV; goto exit_set_fnode; } idx = ev->u.set_flashnode.flashnode_idx; fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", __func__, idx, ev->u.set_flashnode.host_no); err = -ENODEV; goto put_host; } dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len); put_device(dev); put_sess: put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); exit_set_fnode: return err; } static int iscsi_new_flashnode(struct iscsi_transport *transport, struct iscsi_uevent *ev, uint32_t len) { char *data = (char *)ev + sizeof(*ev); struct Scsi_Host *shost; int index; int err = 0; if (!transport->new_flashnode) { err = -ENOSYS; goto exit_new_fnode; } shost = scsi_host_lookup(ev->u.new_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.new_flashnode.host_no); err = -ENODEV; goto exit_new_fnode; } index = transport->new_flashnode(shost, data, len); if (index >= 0) ev->r.new_flashnode_ret.flashnode_idx = index; else err = -EIO; scsi_host_put(shost); exit_new_fnode: return err; } static int iscsi_del_flashnode(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; uint32_t idx; int err = 0; if (!transport->del_flashnode) { err = -ENOSYS; goto exit_del_fnode; } shost = scsi_host_lookup(ev->u.del_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.del_flashnode.host_no); err = -ENODEV; goto exit_del_fnode; } idx = ev->u.del_flashnode.flashnode_idx; fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", __func__, idx, ev->u.del_flashnode.host_no); err = -ENODEV; goto put_host; } err = transport->del_flashnode(fnode_sess); put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); exit_del_fnode: return err; } static int iscsi_login_flashnode(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; uint32_t idx; int err = 0; if (!transport->login_flashnode) { err = -ENOSYS; goto exit_login_fnode; } shost = scsi_host_lookup(ev->u.login_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.login_flashnode.host_no); err = -ENODEV; goto exit_login_fnode; } idx = ev->u.login_flashnode.flashnode_idx; fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", __func__, idx, ev->u.login_flashnode.host_no); err = -ENODEV; goto put_host; } dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->login_flashnode(fnode_sess, fnode_conn); put_device(dev); put_sess: put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); exit_login_fnode: return err; } static int iscsi_logout_flashnode(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; uint32_t idx; int err = 0; if (!transport->logout_flashnode) { err = -ENOSYS; goto exit_logout_fnode; } shost = scsi_host_lookup(ev->u.logout_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.logout_flashnode.host_no); err = -ENODEV; goto exit_logout_fnode; } idx = ev->u.logout_flashnode.flashnode_idx; fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", __func__, idx, ev->u.logout_flashnode.host_no); err = -ENODEV; goto put_host; } dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->logout_flashnode(fnode_sess, fnode_conn); put_device(dev); put_sess: put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); exit_logout_fnode: return err; } static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; struct iscsi_cls_session *session; int err = 0; if (!transport->logout_flashnode_sid) { err = -ENOSYS; goto exit_logout_sid; } shost = scsi_host_lookup(ev->u.logout_flashnode_sid.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.logout_flashnode.host_no); err = -ENODEV; goto exit_logout_sid; } session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid); if (!session) { pr_err("%s could not find session id %u\n", __func__, ev->u.logout_flashnode_sid.sid); err = -EINVAL; goto put_host; } err = transport->logout_flashnode_sid(session); put_host: scsi_host_put(shost); exit_logout_sid: return err; } static int iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) { struct iscsi_uevent *ev = nlmsg_data(nlh); struct Scsi_Host *shost = NULL; struct iscsi_internal *priv; struct sk_buff *skbhost_stats; struct nlmsghdr *nlhhost_stats; struct iscsi_uevent *evhost_stats; int host_stats_size = 0; int len, err = 0; char *buf; if (!transport->get_host_stats) return -ENOSYS; priv = iscsi_if_transport_lookup(transport); if (!priv) return -EINVAL; host_stats_size = sizeof(struct iscsi_offload_host_stats); len = nlmsg_total_size(sizeof(*ev) + host_stats_size); shost = scsi_host_lookup(ev->u.get_host_stats.host_no); if (!shost) { pr_err("%s: failed. Could not find host no %u\n", __func__, ev->u.get_host_stats.host_no); return -ENODEV; } do { int actual_size; skbhost_stats = alloc_skb(len, GFP_KERNEL); if (!skbhost_stats) { pr_err("cannot deliver host stats: OOM\n"); err = -ENOMEM; goto exit_host_stats; } nlhhost_stats = __nlmsg_put(skbhost_stats, 0, 0, 0, (len - sizeof(*nlhhost_stats)), 0); evhost_stats = nlmsg_data(nlhhost_stats); memset(evhost_stats, 0, sizeof(*evhost_stats)); evhost_stats->transport_handle = iscsi_handle(transport); evhost_stats->type = nlh->nlmsg_type; evhost_stats->u.get_host_stats.host_no = ev->u.get_host_stats.host_no; buf = (char *)evhost_stats + sizeof(*evhost_stats); memset(buf, 0, host_stats_size); err = transport->get_host_stats(shost, buf, host_stats_size); if (err) { kfree_skb(skbhost_stats); goto exit_host_stats; } actual_size = nlmsg_total_size(sizeof(*ev) + host_stats_size); skb_trim(skbhost_stats, NLMSG_ALIGN(actual_size)); nlhhost_stats->nlmsg_len = actual_size; err = iscsi_multicast_skb(skbhost_stats, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); } while (err < 0 && err != -ECONNREFUSED); exit_host_stats: scsi_host_put(shost); return err; } static int iscsi_if_transport_conn(struct iscsi_transport *transport, struct nlmsghdr *nlh, u32 pdu_len) { struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_cls_session *session; struct iscsi_cls_conn *conn = NULL; struct iscsi_endpoint *ep; int err = 0; switch (nlh->nlmsg_type) { case ISCSI_UEVENT_CREATE_CONN: return iscsi_if_create_conn(transport, ev); case ISCSI_UEVENT_DESTROY_CONN: return iscsi_if_destroy_conn(transport, ev); case ISCSI_UEVENT_STOP_CONN: conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid); if (!conn) return -EINVAL; return iscsi_if_stop_conn(conn, ev->u.stop_conn.flag); } /* * The following cmds need to be run under the ep_mutex so in kernel * conn cleanup (ep_disconnect + unbind and conn) is not done while * these are running. They also must not run if we have just run a conn * cleanup because they would set the state in a way that might allow * IO or send IO themselves. */ switch (nlh->nlmsg_type) { case ISCSI_UEVENT_START_CONN: conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid); break; case ISCSI_UEVENT_BIND_CONN: conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid); break; case ISCSI_UEVENT_SEND_PDU: conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid); break; } if (!conn) return -EINVAL; mutex_lock(&conn->ep_mutex); spin_lock_irq(&conn->lock); if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { spin_unlock_irq(&conn->lock); mutex_unlock(&conn->ep_mutex); ev->r.retcode = -ENOTCONN; return 0; } spin_unlock_irq(&conn->lock); switch (nlh->nlmsg_type) { case ISCSI_UEVENT_BIND_CONN: session = iscsi_session_lookup(ev->u.b_conn.sid); if (!session) { err = -EINVAL; break; } ev->r.retcode = transport->bind_conn(session, conn, ev->u.b_conn.transport_eph, ev->u.b_conn.is_leading); if (!ev->r.retcode) WRITE_ONCE(conn->state, ISCSI_CONN_BOUND); if (ev->r.retcode || !transport->ep_connect) break; ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph); if (ep) { ep->conn = conn; conn->ep = ep; iscsi_put_endpoint(ep); } else { err = -ENOTCONN; iscsi_cls_conn_printk(KERN_ERR, conn, "Could not set ep conn binding\n"); } break; case ISCSI_UEVENT_START_CONN: ev->r.retcode = transport->start_conn(conn); if (!ev->r.retcode) WRITE_ONCE(conn->state, ISCSI_CONN_UP); break; case ISCSI_UEVENT_SEND_PDU: if ((ev->u.send_pdu.hdr_size > pdu_len) || (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) { err = -EINVAL; break; } ev->r.retcode = transport->send_pdu(conn, (struct iscsi_hdr *)((char *)ev + sizeof(*ev)), (char *)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, ev->u.send_pdu.data_size); break; default: err = -ENOSYS; } mutex_unlock(&conn->ep_mutex); return err; } static int iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; u32 portid; struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_transport *transport = NULL; struct iscsi_internal *priv; struct iscsi_cls_session *session; struct iscsi_endpoint *ep = NULL; u32 rlen; if (!netlink_capable(skb, CAP_SYS_ADMIN)) return -EPERM; if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE) *group = ISCSI_NL_GRP_UIP; else *group = ISCSI_NL_GRP_ISCSID; priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); if (!priv) return -EINVAL; transport = priv->iscsi_transport; if (!try_module_get(transport->owner)) return -EINVAL; portid = NETLINK_CB(skb).portid; /* * Even though the remaining payload may not be regarded as nlattr, * (like address or something else), calculate the remaining length * here to ease following length checks. */ rlen = nlmsg_attrlen(nlh, sizeof(*ev)); switch (nlh->nlmsg_type) { case ISCSI_UEVENT_CREATE_SESSION: err = iscsi_if_create_session(priv, ep, ev, portid, ev->u.c_session.initial_cmdsn, ev->u.c_session.cmds_max, ev->u.c_session.queue_depth); break; case ISCSI_UEVENT_CREATE_BOUND_SESSION: ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle); if (!ep) { err = -EINVAL; break; } err = iscsi_if_create_session(priv, ep, ev, portid, ev->u.c_bound_session.initial_cmdsn, ev->u.c_bound_session.cmds_max, ev->u.c_bound_session.queue_depth); iscsi_put_endpoint(ep); break; case ISCSI_UEVENT_DESTROY_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); if (!session) err = -EINVAL; else if (iscsi_session_has_conns(ev->u.d_session.sid)) err = -EBUSY; else transport->destroy_session(session); break; case ISCSI_UEVENT_DESTROY_SESSION_ASYNC: session = iscsi_session_lookup(ev->u.d_session.sid); if (!session) err = -EINVAL; else if (iscsi_session_has_conns(ev->u.d_session.sid)) err = -EBUSY; else { unsigned long flags; /* Prevent this session from being found again */ spin_lock_irqsave(&sesslock, flags); list_del_init(&session->sess_list); spin_unlock_irqrestore(&sesslock, flags); queue_work(system_unbound_wq, &session->destroy_work); } break; case ISCSI_UEVENT_UNBIND_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); if (session) queue_work(session->workq, &session->unbind_work); else err = -EINVAL; break; case ISCSI_UEVENT_SET_PARAM: err = iscsi_if_set_param(transport, ev, rlen); break; case ISCSI_UEVENT_CREATE_CONN: case ISCSI_UEVENT_DESTROY_CONN: case ISCSI_UEVENT_STOP_CONN: case ISCSI_UEVENT_START_CONN: case ISCSI_UEVENT_BIND_CONN: case ISCSI_UEVENT_SEND_PDU: err = iscsi_if_transport_conn(transport, nlh, rlen); break; case ISCSI_UEVENT_GET_STATS: err = iscsi_if_get_stats(transport, nlh); break; case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: case ISCSI_UEVENT_TRANSPORT_EP_POLL: case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST: err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type, rlen); break; case ISCSI_UEVENT_TGT_DSCVR: err = iscsi_tgt_dscvr(transport, ev, rlen); break; case ISCSI_UEVENT_SET_HOST_PARAM: err = iscsi_set_host_param(transport, ev, rlen); break; case ISCSI_UEVENT_PATH_UPDATE: err = iscsi_set_path(transport, ev, rlen); break; case ISCSI_UEVENT_SET_IFACE_PARAMS: err = iscsi_set_iface_params(transport, ev, rlen); break; case ISCSI_UEVENT_PING: err = iscsi_send_ping(transport, ev, rlen); break; case ISCSI_UEVENT_GET_CHAP: err = iscsi_get_chap(transport, nlh); break; case ISCSI_UEVENT_DELETE_CHAP: err = iscsi_delete_chap(transport, ev); break; case ISCSI_UEVENT_SET_FLASHNODE_PARAMS: err = iscsi_set_flashnode_param(transport, ev, rlen); break; case ISCSI_UEVENT_NEW_FLASHNODE: err = iscsi_new_flashnode(transport, ev, rlen); break; case ISCSI_UEVENT_DEL_FLASHNODE: err = iscsi_del_flashnode(transport, ev); break; case ISCSI_UEVENT_LOGIN_FLASHNODE: err = iscsi_login_flashnode(transport, ev); break; case ISCSI_UEVENT_LOGOUT_FLASHNODE: err = iscsi_logout_flashnode(transport, ev); break; case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID: err = iscsi_logout_flashnode_sid(transport, ev); break; case ISCSI_UEVENT_SET_CHAP: err = iscsi_set_chap(transport, ev, rlen); break; case ISCSI_UEVENT_GET_HOST_STATS: err = iscsi_get_host_stats(transport, nlh); break; default: err = -ENOSYS; break; } module_put(transport->owner); return err; } /* * Get message from skb. Each message is processed by iscsi_if_recv_msg. * Malformed skbs with wrong lengths or invalid creds are not processed. */ static void iscsi_if_rx(struct sk_buff *skb) { u32 portid = NETLINK_CB(skb).portid; mutex_lock(&rx_queue_mutex); while (skb->len >= NLMSG_HDRLEN) { int err; uint32_t rlen; struct nlmsghdr *nlh; struct iscsi_uevent *ev; uint32_t group; int retries = ISCSI_SEND_MAX_ALLOWED; nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) || skb->len < nlh->nlmsg_len) { break; } ev = nlmsg_data(nlh); rlen = NLMSG_ALIGN(nlh->nlmsg_len); if (rlen > skb->len) rlen = skb->len; err = iscsi_if_recv_msg(skb, nlh, &group); if (err) { ev->type = ISCSI_KEVENT_IF_ERROR; ev->iferror = err; } do { /* * special case for GET_STATS, GET_CHAP and GET_HOST_STATS: * on success - sending reply and stats from * inside of if_recv_msg(), * on error - fall through. */ if (ev->type == ISCSI_UEVENT_GET_STATS && !err) break; if (ev->type == ISCSI_UEVENT_GET_CHAP && !err) break; if (ev->type == ISCSI_UEVENT_GET_HOST_STATS && !err) break; err = iscsi_if_send_reply(portid, nlh->nlmsg_type, ev, sizeof(*ev)); if (err == -EAGAIN && --retries < 0) { printk(KERN_WARNING "Send reply failed, error %d\n", err); break; } } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); skb_pull(skb, rlen); } mutex_unlock(&rx_queue_mutex); } #define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store) \ struct device_attribute dev_attr_##_prefix##_##_name = \ __ATTR(_name,_mode,_show,_store) /* * iSCSI connection attrs */ #define iscsi_conn_attr_show(param) \ static ssize_t \ show_conn_param_##param(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \ struct iscsi_transport *t = conn->transport; \ return t->get_conn_param(conn, param, buf); \ } #define iscsi_conn_attr(field, param) \ iscsi_conn_attr_show(param) \ static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, show_conn_param_##param, \ NULL); iscsi_conn_attr(max_recv_dlength, ISCSI_PARAM_MAX_RECV_DLENGTH); iscsi_conn_attr(max_xmit_dlength, ISCSI_PARAM_MAX_XMIT_DLENGTH); iscsi_conn_attr(header_digest, ISCSI_PARAM_HDRDGST_EN); iscsi_conn_attr(data_digest, ISCSI_PARAM_DATADGST_EN); iscsi_conn_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN); iscsi_conn_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN); iscsi_conn_attr(persistent_port, ISCSI_PARAM_PERSISTENT_PORT); iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN); iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS); iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO); iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO); iscsi_conn_attr(local_port, ISCSI_PARAM_LOCAL_PORT); iscsi_conn_attr(statsn, ISCSI_PARAM_STATSN); iscsi_conn_attr(keepalive_tmo, ISCSI_PARAM_KEEPALIVE_TMO); iscsi_conn_attr(max_segment_size, ISCSI_PARAM_MAX_SEGMENT_SIZE); iscsi_conn_attr(tcp_timestamp_stat, ISCSI_PARAM_TCP_TIMESTAMP_STAT); iscsi_conn_attr(tcp_wsf_disable, ISCSI_PARAM_TCP_WSF_DISABLE); iscsi_conn_attr(tcp_nagle_disable, ISCSI_PARAM_TCP_NAGLE_DISABLE); iscsi_conn_attr(tcp_timer_scale, ISCSI_PARAM_TCP_TIMER_SCALE); iscsi_conn_attr(tcp_timestamp_enable, ISCSI_PARAM_TCP_TIMESTAMP_EN); iscsi_conn_attr(fragment_disable, ISCSI_PARAM_IP_FRAGMENT_DISABLE); iscsi_conn_attr(ipv4_tos, ISCSI_PARAM_IPV4_TOS); iscsi_conn_attr(ipv6_traffic_class, ISCSI_PARAM_IPV6_TC); iscsi_conn_attr(ipv6_flow_label, ISCSI_PARAM_IPV6_FLOW_LABEL); iscsi_conn_attr(is_fw_assigned_ipv6, ISCSI_PARAM_IS_FW_ASSIGNED_IPV6); iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF); iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF); iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR); static const char *const connection_state_names[] = { [ISCSI_CONN_UP] = "up", [ISCSI_CONN_DOWN] = "down", [ISCSI_CONN_FAILED] = "failed", [ISCSI_CONN_BOUND] = "bound" }; static ssize_t show_conn_state(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); const char *state = "unknown"; int conn_state = READ_ONCE(conn->state); if (conn_state >= 0 && conn_state < ARRAY_SIZE(connection_state_names)) state = connection_state_names[conn_state]; return sysfs_emit(buf, "%s\n", state); } static ISCSI_CLASS_ATTR(conn, state, S_IRUGO, show_conn_state, NULL); #define iscsi_conn_ep_attr_show(param) \ static ssize_t show_conn_ep_param_##param(struct device *dev, \ struct device_attribute *attr,\ char *buf) \ { \ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \ struct iscsi_transport *t = conn->transport; \ struct iscsi_endpoint *ep; \ ssize_t rc; \ \ /* \ * Need to make sure ep_disconnect does not free the LLD's \ * interconnect resources while we are trying to read them. \ */ \ mutex_lock(&conn->ep_mutex); \ ep = conn->ep; \ if (!ep && t->ep_connect) { \ mutex_unlock(&conn->ep_mutex); \ return -ENOTCONN; \ } \ \ if (ep) \ rc = t->get_ep_param(ep, param, buf); \ else \ rc = t->get_conn_param(conn, param, buf); \ mutex_unlock(&conn->ep_mutex); \ return rc; \ } #define iscsi_conn_ep_attr(field, param) \ iscsi_conn_ep_attr_show(param) \ static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, \ show_conn_ep_param_##param, NULL); iscsi_conn_ep_attr(address, ISCSI_PARAM_CONN_ADDRESS); iscsi_conn_ep_attr(port, ISCSI_PARAM_CONN_PORT); static struct attribute *iscsi_conn_attrs[] = { &dev_attr_conn_max_recv_dlength.attr, &dev_attr_conn_max_xmit_dlength.attr, &dev_attr_conn_header_digest.attr, &dev_attr_conn_data_digest.attr, &dev_attr_conn_ifmarker.attr, &dev_attr_conn_ofmarker.attr, &dev_attr_conn_address.attr, &dev_attr_conn_port.attr, &dev_attr_conn_exp_statsn.attr, &dev_attr_conn_persistent_address.attr, &dev_attr_conn_persistent_port.attr, &dev_attr_conn_ping_tmo.attr, &dev_attr_conn_recv_tmo.attr, &dev_attr_conn_local_port.attr, &dev_attr_conn_statsn.attr, &dev_attr_conn_keepalive_tmo.attr, &dev_attr_conn_max_segment_size.attr, &dev_attr_conn_tcp_timestamp_stat.attr, &dev_attr_conn_tcp_wsf_disable.attr, &dev_attr_conn_tcp_nagle_disable.attr, &dev_attr_conn_tcp_timer_scale.attr, &dev_attr_conn_tcp_timestamp_enable.attr, &dev_attr_conn_fragment_disable.attr, &dev_attr_conn_ipv4_tos.attr, &dev_attr_conn_ipv6_traffic_class.attr, &dev_attr_conn_ipv6_flow_label.attr, &dev_attr_conn_is_fw_assigned_ipv6.attr, &dev_attr_conn_tcp_xmit_wsf.attr, &dev_attr_conn_tcp_recv_wsf.attr, &dev_attr_conn_local_ipaddr.attr, &dev_attr_conn_state.attr, NULL, }; static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *cdev = container_of(kobj, struct device, kobj); struct iscsi_cls_conn *conn = transport_class_to_conn(cdev); struct iscsi_transport *t = conn->transport; int param; if (attr == &dev_attr_conn_max_recv_dlength.attr) param = ISCSI_PARAM_MAX_RECV_DLENGTH; else if (attr == &dev_attr_conn_max_xmit_dlength.attr) param = ISCSI_PARAM_MAX_XMIT_DLENGTH; else if (attr == &dev_attr_conn_header_digest.attr) param = ISCSI_PARAM_HDRDGST_EN; else if (attr == &dev_attr_conn_data_digest.attr) param = ISCSI_PARAM_DATADGST_EN; else if (attr == &dev_attr_conn_ifmarker.attr) param = ISCSI_PARAM_IFMARKER_EN; else if (attr == &dev_attr_conn_ofmarker.attr) param = ISCSI_PARAM_OFMARKER_EN; else if (attr == &dev_attr_conn_address.attr) param = ISCSI_PARAM_CONN_ADDRESS; else if (attr == &dev_attr_conn_port.attr) param = ISCSI_PARAM_CONN_PORT; else if (attr == &dev_attr_conn_exp_statsn.attr) param = ISCSI_PARAM_EXP_STATSN; else if (attr == &dev_attr_conn_persistent_address.attr) param = ISCSI_PARAM_PERSISTENT_ADDRESS; else if (attr == &dev_attr_conn_persistent_port.attr) param = ISCSI_PARAM_PERSISTENT_PORT; else if (attr == &dev_attr_conn_ping_tmo.attr) param = ISCSI_PARAM_PING_TMO; else if (attr == &dev_attr_conn_recv_tmo.attr) param = ISCSI_PARAM_RECV_TMO; else if (attr == &dev_attr_conn_local_port.attr) param = ISCSI_PARAM_LOCAL_PORT; else if (attr == &dev_attr_conn_statsn.attr) param = ISCSI_PARAM_STATSN; else if (attr == &dev_attr_conn_keepalive_tmo.attr) param = ISCSI_PARAM_KEEPALIVE_TMO; else if (attr == &dev_attr_conn_max_segment_size.attr) param = ISCSI_PARAM_MAX_SEGMENT_SIZE; else if (attr == &dev_attr_conn_tcp_timestamp_stat.attr) param = ISCSI_PARAM_TCP_TIMESTAMP_STAT; else if (attr == &dev_attr_conn_tcp_wsf_disable.attr) param = ISCSI_PARAM_TCP_WSF_DISABLE; else if (attr == &dev_attr_conn_tcp_nagle_disable.attr) param = ISCSI_PARAM_TCP_NAGLE_DISABLE; else if (attr == &dev_attr_conn_tcp_timer_scale.attr) param = ISCSI_PARAM_TCP_TIMER_SCALE; else if (attr == &dev_attr_conn_tcp_timestamp_enable.attr) param = ISCSI_PARAM_TCP_TIMESTAMP_EN; else if (attr == &dev_attr_conn_fragment_disable.attr) param = ISCSI_PARAM_IP_FRAGMENT_DISABLE; else if (attr == &dev_attr_conn_ipv4_tos.attr) param = ISCSI_PARAM_IPV4_TOS; else if (attr == &dev_attr_conn_ipv6_traffic_class.attr) param = ISCSI_PARAM_IPV6_TC; else if (attr == &dev_attr_conn_ipv6_flow_label.attr) param = ISCSI_PARAM_IPV6_FLOW_LABEL; else if (attr == &dev_attr_conn_is_fw_assigned_ipv6.attr) param = ISCSI_PARAM_IS_FW_ASSIGNED_IPV6; else if (attr == &dev_attr_conn_tcp_xmit_wsf.attr) param = ISCSI_PARAM_TCP_XMIT_WSF; else if (attr == &dev_attr_conn_tcp_recv_wsf.attr) param = ISCSI_PARAM_TCP_RECV_WSF; else if (attr == &dev_attr_conn_local_ipaddr.attr) param = ISCSI_PARAM_LOCAL_IPADDR; else if (attr == &dev_attr_conn_state.attr) return S_IRUGO; else { WARN_ONCE(1, "Invalid conn attr"); return 0; } return t->attr_is_visible(ISCSI_PARAM, param); } static struct attribute_group iscsi_conn_group = { .attrs = iscsi_conn_attrs, .is_visible = iscsi_conn_attr_is_visible, }; /* * iSCSI session attrs */ #define iscsi_session_attr_show(param, perm) \ static ssize_t \ show_session_param_##param(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct iscsi_cls_session *session = \ iscsi_dev_to_session(dev->parent); \ struct iscsi_transport *t = session->transport; \ \ if (perm && !capable(CAP_SYS_ADMIN)) \ return -EACCES; \ return t->get_session_param(session, param, buf); \ } #define iscsi_session_attr(field, param, perm) \ iscsi_session_attr_show(param, perm) \ static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \ NULL); iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0); iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0); iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0); iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN, 0); iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST, 0); iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST, 0); iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN, 0); iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN, 0); iscsi_session_attr(erl, ISCSI_PARAM_ERL, 0); iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT, 0); iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1); iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1); iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1); iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1); iscsi_session_attr(chap_out_idx, ISCSI_PARAM_CHAP_OUT_IDX, 1); iscsi_session_attr(chap_in_idx, ISCSI_PARAM_CHAP_IN_IDX, 1); iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0); iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); iscsi_session_attr(tgt_reset_tmo, ISCSI_PARAM_TGT_RESET_TMO, 0); iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0); iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0); iscsi_session_attr(targetalias, ISCSI_PARAM_TARGET_ALIAS, 0); iscsi_session_attr(boot_root, ISCSI_PARAM_BOOT_ROOT, 0); iscsi_session_attr(boot_nic, ISCSI_PARAM_BOOT_NIC, 0); iscsi_session_attr(boot_target, ISCSI_PARAM_BOOT_TARGET, 0); iscsi_session_attr(auto_snd_tgt_disable, ISCSI_PARAM_AUTO_SND_TGT_DISABLE, 0); iscsi_session_attr(discovery_session, ISCSI_PARAM_DISCOVERY_SESS, 0); iscsi_session_attr(portal_type, ISCSI_PARAM_PORTAL_TYPE, 0); iscsi_session_attr(chap_auth, ISCSI_PARAM_CHAP_AUTH_EN, 0); iscsi_session_attr(discovery_logout, ISCSI_PARAM_DISCOVERY_LOGOUT_EN, 0); iscsi_session_attr(bidi_chap, ISCSI_PARAM_BIDI_CHAP_EN, 0); iscsi_session_attr(discovery_auth_optional, ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL, 0); iscsi_session_attr(def_time2wait, ISCSI_PARAM_DEF_TIME2WAIT, 0); iscsi_session_attr(def_time2retain, ISCSI_PARAM_DEF_TIME2RETAIN, 0); iscsi_session_attr(isid, ISCSI_PARAM_ISID, 0); iscsi_session_attr(tsid, ISCSI_PARAM_TSID, 0); iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0); iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0); iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0); static ssize_t show_priv_session_target_state(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); return sysfs_emit(buf, "%s\n", iscsi_session_target_state_name[session->target_state]); } static ISCSI_CLASS_ATTR(priv_sess, target_state, S_IRUGO, show_priv_session_target_state, NULL); static ssize_t show_priv_session_state(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); return sysfs_emit(buf, "%s\n", iscsi_session_state_name(session->state)); } static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state, NULL); static ssize_t show_priv_session_creator(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); return sysfs_emit(buf, "%d\n", session->creator); } static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator, NULL); static ssize_t show_priv_session_target_id(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); return sysfs_emit(buf, "%d\n", session->target_id); } static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO, show_priv_session_target_id, NULL); #define iscsi_priv_session_attr_show(field, format) \ static ssize_t \ show_priv_session_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct iscsi_cls_session *session = \ iscsi_dev_to_session(dev->parent); \ if (session->field == -1) \ return sysfs_emit(buf, "off\n"); \ return sysfs_emit(buf, format"\n", session->field); \ } #define iscsi_priv_session_attr_store(field) \ static ssize_t \ store_priv_session_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int val; \ char *cp; \ struct iscsi_cls_session *session = \ iscsi_dev_to_session(dev->parent); \ if ((session->state == ISCSI_SESSION_FREE) || \ (session->state == ISCSI_SESSION_FAILED)) \ return -EBUSY; \ if (strncmp(buf, "off", 3) == 0) { \ session->field = -1; \ session->field##_sysfs_override = true; \ } else { \ val = simple_strtoul(buf, &cp, 0); \ if (*cp != '\0' && *cp != '\n') \ return -EINVAL; \ session->field = val; \ session->field##_sysfs_override = true; \ } \ return count; \ } #define iscsi_priv_session_rw_attr(field, format) \ iscsi_priv_session_attr_show(field, format) \ iscsi_priv_session_attr_store(field) \ static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \ show_priv_session_##field, \ store_priv_session_##field) iscsi_priv_session_rw_attr(recovery_tmo, "%d"); static struct attribute *iscsi_session_attrs[] = { &dev_attr_sess_initial_r2t.attr, &dev_attr_sess_max_outstanding_r2t.attr, &dev_attr_sess_immediate_data.attr, &dev_attr_sess_first_burst_len.attr, &dev_attr_sess_max_burst_len.attr, &dev_attr_sess_data_pdu_in_order.attr, &dev_attr_sess_data_seq_in_order.attr, &dev_attr_sess_erl.attr, &dev_attr_sess_targetname.attr, &dev_attr_sess_tpgt.attr, &dev_attr_sess_password.attr, &dev_attr_sess_password_in.attr, &dev_attr_sess_username.attr, &dev_attr_sess_username_in.attr, &dev_attr_sess_fast_abort.attr, &dev_attr_sess_abort_tmo.attr, &dev_attr_sess_lu_reset_tmo.attr, &dev_attr_sess_tgt_reset_tmo.attr, &dev_attr_sess_ifacename.attr, &dev_attr_sess_initiatorname.attr, &dev_attr_sess_targetalias.attr, &dev_attr_sess_boot_root.attr, &dev_attr_sess_boot_nic.attr, &dev_attr_sess_boot_target.attr, &dev_attr_priv_sess_recovery_tmo.attr, &dev_attr_priv_sess_state.attr, &dev_attr_priv_sess_target_state.attr, &dev_attr_priv_sess_creator.attr, &dev_attr_sess_chap_out_idx.attr, &dev_attr_sess_chap_in_idx.attr, &dev_attr_priv_sess_target_id.attr, &dev_attr_sess_auto_snd_tgt_disable.attr, &dev_attr_sess_discovery_session.attr, &dev_attr_sess_portal_type.attr, &dev_attr_sess_chap_auth.attr, &dev_attr_sess_discovery_logout.attr, &dev_attr_sess_bidi_chap.attr, &dev_attr_sess_discovery_auth_optional.attr, &dev_attr_sess_def_time2wait.attr, &dev_attr_sess_def_time2retain.attr, &dev_attr_sess_isid.attr, &dev_attr_sess_tsid.attr, &dev_attr_sess_def_taskmgmt_tmo.attr, &dev_attr_sess_discovery_parent_idx.attr, &dev_attr_sess_discovery_parent_type.attr, NULL, }; static umode_t iscsi_session_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *cdev = container_of(kobj, struct device, kobj); struct iscsi_cls_session *session = transport_class_to_session(cdev); struct iscsi_transport *t = session->transport; int param; if (attr == &dev_attr_sess_initial_r2t.attr) param = ISCSI_PARAM_INITIAL_R2T_EN; else if (attr == &dev_attr_sess_max_outstanding_r2t.attr) param = ISCSI_PARAM_MAX_R2T; else if (attr == &dev_attr_sess_immediate_data.attr) param = ISCSI_PARAM_IMM_DATA_EN; else if (attr == &dev_attr_sess_first_burst_len.attr) param = ISCSI_PARAM_FIRST_BURST; else if (attr == &dev_attr_sess_max_burst_len.attr) param = ISCSI_PARAM_MAX_BURST; else if (attr == &dev_attr_sess_data_pdu_in_order.attr) param = ISCSI_PARAM_PDU_INORDER_EN; else if (attr == &dev_attr_sess_data_seq_in_order.attr) param = ISCSI_PARAM_DATASEQ_INORDER_EN; else if (attr == &dev_attr_sess_erl.attr) param = ISCSI_PARAM_ERL; else if (attr == &dev_attr_sess_targetname.attr) param = ISCSI_PARAM_TARGET_NAME; else if (attr == &dev_attr_sess_tpgt.attr) param = ISCSI_PARAM_TPGT; else if (attr == &dev_attr_sess_chap_in_idx.attr) param = ISCSI_PARAM_CHAP_IN_IDX; else if (attr == &dev_attr_sess_chap_out_idx.attr) param = ISCSI_PARAM_CHAP_OUT_IDX; else if (attr == &dev_attr_sess_password.attr) param = ISCSI_PARAM_USERNAME; else if (attr == &dev_attr_sess_password_in.attr) param = ISCSI_PARAM_USERNAME_IN; else if (attr == &dev_attr_sess_username.attr) param = ISCSI_PARAM_PASSWORD; else if (attr == &dev_attr_sess_username_in.attr) param = ISCSI_PARAM_PASSWORD_IN; else if (attr == &dev_attr_sess_fast_abort.attr) param = ISCSI_PARAM_FAST_ABORT; else if (attr == &dev_attr_sess_abort_tmo.attr) param = ISCSI_PARAM_ABORT_TMO; else if (attr == &dev_attr_sess_lu_reset_tmo.attr) param = ISCSI_PARAM_LU_RESET_TMO; else if (attr == &dev_attr_sess_tgt_reset_tmo.attr) param = ISCSI_PARAM_TGT_RESET_TMO; else if (attr == &dev_attr_sess_ifacename.attr) param = ISCSI_PARAM_IFACE_NAME; else if (attr == &dev_attr_sess_initiatorname.attr) param = ISCSI_PARAM_INITIATOR_NAME; else if (attr == &dev_attr_sess_targetalias.attr) param = ISCSI_PARAM_TARGET_ALIAS; else if (attr == &dev_attr_sess_boot_root.attr) param = ISCSI_PARAM_BOOT_ROOT; else if (attr == &dev_attr_sess_boot_nic.attr) param = ISCSI_PARAM_BOOT_NIC; else if (attr == &dev_attr_sess_boot_target.attr) param = ISCSI_PARAM_BOOT_TARGET; else if (attr == &dev_attr_sess_auto_snd_tgt_disable.attr) param = ISCSI_PARAM_AUTO_SND_TGT_DISABLE; else if (attr == &dev_attr_sess_discovery_session.attr) param = ISCSI_PARAM_DISCOVERY_SESS; else if (attr == &dev_attr_sess_portal_type.attr) param = ISCSI_PARAM_PORTAL_TYPE; else if (attr == &dev_attr_sess_chap_auth.attr) param = ISCSI_PARAM_CHAP_AUTH_EN; else if (attr == &dev_attr_sess_discovery_logout.attr) param = ISCSI_PARAM_DISCOVERY_LOGOUT_EN; else if (attr == &dev_attr_sess_bidi_chap.attr) param = ISCSI_PARAM_BIDI_CHAP_EN; else if (attr == &dev_attr_sess_discovery_auth_optional.attr) param = ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL; else if (attr == &dev_attr_sess_def_time2wait.attr) param = ISCSI_PARAM_DEF_TIME2WAIT; else if (attr == &dev_attr_sess_def_time2retain.attr) param = ISCSI_PARAM_DEF_TIME2RETAIN; else if (attr == &dev_attr_sess_isid.attr) param = ISCSI_PARAM_ISID; else if (attr == &dev_attr_sess_tsid.attr) param = ISCSI_PARAM_TSID; else if (attr == &dev_attr_sess_def_taskmgmt_tmo.attr) param = ISCSI_PARAM_DEF_TASKMGMT_TMO; else if (attr == &dev_attr_sess_discovery_parent_idx.attr) param = ISCSI_PARAM_DISCOVERY_PARENT_IDX; else if (attr == &dev_attr_sess_discovery_parent_type.attr) param = ISCSI_PARAM_DISCOVERY_PARENT_TYPE; else if (attr == &dev_attr_priv_sess_recovery_tmo.attr) return S_IRUGO | S_IWUSR; else if (attr == &dev_attr_priv_sess_state.attr) return S_IRUGO; else if (attr == &dev_attr_priv_sess_target_state.attr) return S_IRUGO; else if (attr == &dev_attr_priv_sess_creator.attr) return S_IRUGO; else if (attr == &dev_attr_priv_sess_target_id.attr) return S_IRUGO; else { WARN_ONCE(1, "Invalid session attr"); return 0; } return t->attr_is_visible(ISCSI_PARAM, param); } static struct attribute_group iscsi_session_group = { .attrs = iscsi_session_attrs, .is_visible = iscsi_session_attr_is_visible, }; /* * iSCSI host attrs */ #define iscsi_host_attr_show(param) \ static ssize_t \ show_host_param_##param(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct Scsi_Host *shost = transport_class_to_shost(dev); \ struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \ return priv->iscsi_transport->get_host_param(shost, param, buf); \ } #define iscsi_host_attr(field, param) \ iscsi_host_attr_show(param) \ static ISCSI_CLASS_ATTR(host, field, S_IRUGO, show_host_param_##param, \ NULL); iscsi_host_attr(netdev, ISCSI_HOST_PARAM_NETDEV_NAME); iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS); iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS); iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME); iscsi_host_attr(port_state, ISCSI_HOST_PARAM_PORT_STATE); iscsi_host_attr(port_speed, ISCSI_HOST_PARAM_PORT_SPEED); static struct attribute *iscsi_host_attrs[] = { &dev_attr_host_netdev.attr, &dev_attr_host_hwaddress.attr, &dev_attr_host_ipaddress.attr, &dev_attr_host_initiatorname.attr, &dev_attr_host_port_state.attr, &dev_attr_host_port_speed.attr, NULL, }; static umode_t iscsi_host_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *cdev = container_of(kobj, struct device, kobj); struct Scsi_Host *shost = transport_class_to_shost(cdev); struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); int param; if (attr == &dev_attr_host_netdev.attr) param = ISCSI_HOST_PARAM_NETDEV_NAME; else if (attr == &dev_attr_host_hwaddress.attr) param = ISCSI_HOST_PARAM_HWADDRESS; else if (attr == &dev_attr_host_ipaddress.attr) param = ISCSI_HOST_PARAM_IPADDRESS; else if (attr == &dev_attr_host_initiatorname.attr) param = ISCSI_HOST_PARAM_INITIATOR_NAME; else if (attr == &dev_attr_host_port_state.attr) param = ISCSI_HOST_PARAM_PORT_STATE; else if (attr == &dev_attr_host_port_speed.attr) param = ISCSI_HOST_PARAM_PORT_SPEED; else { WARN_ONCE(1, "Invalid host attr"); return 0; } return priv->iscsi_transport->attr_is_visible(ISCSI_HOST_PARAM, param); } static struct attribute_group iscsi_host_group = { .attrs = iscsi_host_attrs, .is_visible = iscsi_host_attr_is_visible, }; /* convert iscsi_port_speed values to ascii string name */ static const struct { enum iscsi_port_speed value; char *name; } iscsi_port_speed_names[] = { {ISCSI_PORT_SPEED_UNKNOWN, "Unknown" }, {ISCSI_PORT_SPEED_10MBPS, "10 Mbps" }, {ISCSI_PORT_SPEED_100MBPS, "100 Mbps" }, {ISCSI_PORT_SPEED_1GBPS, "1 Gbps" }, {ISCSI_PORT_SPEED_10GBPS, "10 Gbps" }, {ISCSI_PORT_SPEED_25GBPS, "25 Gbps" }, {ISCSI_PORT_SPEED_40GBPS, "40 Gbps" }, }; char *iscsi_get_port_speed_name(struct Scsi_Host *shost) { int i; char *speed = "Unknown!"; struct iscsi_cls_host *ihost = shost->shost_data; uint32_t port_speed = ihost->port_speed; for (i = 0; i < ARRAY_SIZE(iscsi_port_speed_names); i++) { if (iscsi_port_speed_names[i].value & port_speed) { speed = iscsi_port_speed_names[i].name; break; } } return speed; } EXPORT_SYMBOL_GPL(iscsi_get_port_speed_name); /* convert iscsi_port_state values to ascii string name */ static const struct { enum iscsi_port_state value; char *name; } iscsi_port_state_names[] = { {ISCSI_PORT_STATE_DOWN, "LINK DOWN" }, {ISCSI_PORT_STATE_UP, "LINK UP" }, }; char *iscsi_get_port_state_name(struct Scsi_Host *shost) { int i; char *state = "Unknown!"; struct iscsi_cls_host *ihost = shost->shost_data; uint32_t port_state = ihost->port_state; for (i = 0; i < ARRAY_SIZE(iscsi_port_state_names); i++) { if (iscsi_port_state_names[i].value & port_state) { state = iscsi_port_state_names[i].name; break; } } return state; } EXPORT_SYMBOL_GPL(iscsi_get_port_state_name); static int iscsi_session_match(struct attribute_container *cont, struct device *dev) { struct iscsi_cls_session *session; struct Scsi_Host *shost; struct iscsi_internal *priv; if (!iscsi_is_session_dev(dev)) return 0; session = iscsi_dev_to_session(dev); shost = iscsi_session_to_shost(session); if (!shost->transportt) return 0; priv = to_iscsi_internal(shost->transportt); if (priv->session_cont.ac.class != &iscsi_session_class.class) return 0; return &priv->session_cont.ac == cont; } static int iscsi_conn_match(struct attribute_container *cont, struct device *dev) { struct iscsi_cls_session *session; struct iscsi_cls_conn *conn; struct Scsi_Host *shost; struct iscsi_internal *priv; if (!iscsi_is_conn_dev(dev)) return 0; conn = iscsi_dev_to_conn(dev); session = iscsi_dev_to_session(conn->dev.parent); shost = iscsi_session_to_shost(session); if (!shost->transportt) return 0; priv = to_iscsi_internal(shost->transportt); if (priv->conn_cont.ac.class != &iscsi_connection_class.class) return 0; return &priv->conn_cont.ac == cont; } static int iscsi_host_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct iscsi_internal *priv; if (!scsi_is_host_device(dev)) return 0; shost = dev_to_shost(dev); if (!shost->transportt || shost->transportt->host_attrs.ac.class != &iscsi_host_class.class) return 0; priv = to_iscsi_internal(shost->transportt); return &priv->t.host_attrs.ac == cont; } struct scsi_transport_template * iscsi_register_transport(struct iscsi_transport *tt) { struct iscsi_internal *priv; unsigned long flags; int err; BUG_ON(!tt); WARN_ON(tt->ep_disconnect && !tt->unbind_conn); priv = iscsi_if_transport_lookup(tt); if (priv) return NULL; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return NULL; INIT_LIST_HEAD(&priv->list); priv->iscsi_transport = tt; priv->t.user_scan = iscsi_user_scan; priv->dev.class = &iscsi_transport_class; dev_set_name(&priv->dev, "%s", tt->name); err = device_register(&priv->dev); if (err) goto put_dev; err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group); if (err) goto unregister_dev; /* host parameters */ priv->t.host_attrs.ac.class = &iscsi_host_class.class; priv->t.host_attrs.ac.match = iscsi_host_match; priv->t.host_attrs.ac.grp = &iscsi_host_group; priv->t.host_size = sizeof(struct iscsi_cls_host); transport_container_register(&priv->t.host_attrs); /* connection parameters */ priv->conn_cont.ac.class = &iscsi_connection_class.class; priv->conn_cont.ac.match = iscsi_conn_match; priv->conn_cont.ac.grp = &iscsi_conn_group; transport_container_register(&priv->conn_cont); /* session parameters */ priv->session_cont.ac.class = &iscsi_session_class.class; priv->session_cont.ac.match = iscsi_session_match; priv->session_cont.ac.grp = &iscsi_session_group; transport_container_register(&priv->session_cont); spin_lock_irqsave(&iscsi_transport_lock, flags); list_add(&priv->list, &iscsi_transports); spin_unlock_irqrestore(&iscsi_transport_lock, flags); printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name); return &priv->t; unregister_dev: device_unregister(&priv->dev); return NULL; put_dev: put_device(&priv->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_register_transport); void iscsi_unregister_transport(struct iscsi_transport *tt) { struct iscsi_internal *priv; unsigned long flags; BUG_ON(!tt); mutex_lock(&rx_queue_mutex); priv = iscsi_if_transport_lookup(tt); BUG_ON (!priv); spin_lock_irqsave(&iscsi_transport_lock, flags); list_del(&priv->list); spin_unlock_irqrestore(&iscsi_transport_lock, flags); transport_container_unregister(&priv->conn_cont); transport_container_unregister(&priv->session_cont); transport_container_unregister(&priv->t.host_attrs); sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group); device_unregister(&priv->dev); mutex_unlock(&rx_queue_mutex); } EXPORT_SYMBOL_GPL(iscsi_unregister_transport); void iscsi_dbg_trace(void (*trace)(struct device *dev, struct va_format *), struct device *dev, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; trace(dev, &vaf); va_end(args); } EXPORT_SYMBOL_GPL(iscsi_dbg_trace); static __init int iscsi_transport_init(void) { int err; struct netlink_kernel_cfg cfg = { .groups = 1, .input = iscsi_if_rx, }; printk(KERN_INFO "Loading iSCSI transport class v%s.\n", ISCSI_TRANSPORT_VERSION); atomic_set(&iscsi_session_nr, 0); err = class_register(&iscsi_transport_class); if (err) return err; err = class_register(&iscsi_endpoint_class); if (err) goto unregister_transport_class; err = class_register(&iscsi_iface_class); if (err) goto unregister_endpoint_class; err = transport_class_register(&iscsi_host_class); if (err) goto unregister_iface_class; err = transport_class_register(&iscsi_connection_class); if (err) goto unregister_host_class; err = transport_class_register(&iscsi_session_class); if (err) goto unregister_conn_class; err = bus_register(&iscsi_flashnode_bus); if (err) goto unregister_session_class; nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg); if (!nls) { err = -ENOBUFS; goto unregister_flashnode_bus; } iscsi_conn_cleanup_workq = alloc_workqueue("%s", WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0, "iscsi_conn_cleanup"); if (!iscsi_conn_cleanup_workq) { err = -ENOMEM; goto release_nls; } return 0; release_nls: netlink_kernel_release(nls); unregister_flashnode_bus: bus_unregister(&iscsi_flashnode_bus); unregister_session_class: transport_class_unregister(&iscsi_session_class); unregister_conn_class: transport_class_unregister(&iscsi_connection_class); unregister_host_class: transport_class_unregister(&iscsi_host_class); unregister_iface_class: class_unregister(&iscsi_iface_class); unregister_endpoint_class: class_unregister(&iscsi_endpoint_class); unregister_transport_class: class_unregister(&iscsi_transport_class); return err; } static void __exit iscsi_transport_exit(void) { destroy_workqueue(iscsi_conn_cleanup_workq); netlink_kernel_release(nls); bus_unregister(&iscsi_flashnode_bus); transport_class_unregister(&iscsi_connection_class); transport_class_unregister(&iscsi_session_class); transport_class_unregister(&iscsi_host_class); class_unregister(&iscsi_endpoint_class); class_unregister(&iscsi_iface_class); class_unregister(&iscsi_transport_class); } module_init(iscsi_transport_init); module_exit(iscsi_transport_exit); MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, " "Dmitry Yusupov <dmitry_yus@yahoo.com>, " "Alex Aizman <itn780@yahoo.com>"); MODULE_DESCRIPTION("iSCSI Transport Interface"); MODULE_LICENSE("GPL"); MODULE_VERSION(ISCSI_TRANSPORT_VERSION); MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_ISCSI); |
| 45 45 4 1 1 1 1 4 1 1 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 | // SPDX-License-Identifier: GPL-2.0 /* * Filesystem-level keyring for fscrypt * * Copyright 2019 Google LLC */ /* * This file implements management of fscrypt master keys in the * filesystem-level keyring, including the ioctls: * * - FS_IOC_ADD_ENCRYPTION_KEY * - FS_IOC_REMOVE_ENCRYPTION_KEY * - FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS * - FS_IOC_GET_ENCRYPTION_KEY_STATUS * * See the "User API" section of Documentation/filesystems/fscrypt.rst for more * information about these ioctls. */ #include <crypto/skcipher.h> #include <linux/export.h> #include <linux/key-type.h> #include <linux/once.h> #include <linux/random.h> #include <linux/seq_file.h> #include <linux/unaligned.h> #include "fscrypt_private.h" /* The master encryption keys for a filesystem (->s_master_keys) */ struct fscrypt_keyring { /* * Lock that protects ->key_hashtable. It does *not* protect the * fscrypt_master_key structs themselves. */ spinlock_t lock; /* Hash table that maps fscrypt_key_specifier to fscrypt_master_key */ struct hlist_head key_hashtable[128]; }; static void wipe_master_key_secret(struct fscrypt_master_key_secret *secret) { fscrypt_destroy_hkdf(&secret->hkdf); memzero_explicit(secret, sizeof(*secret)); } static void move_master_key_secret(struct fscrypt_master_key_secret *dst, struct fscrypt_master_key_secret *src) { memcpy(dst, src, sizeof(*dst)); memzero_explicit(src, sizeof(*src)); } static void fscrypt_free_master_key(struct rcu_head *head) { struct fscrypt_master_key *mk = container_of(head, struct fscrypt_master_key, mk_rcu_head); /* * The master key secret and any embedded subkeys should have already * been wiped when the last active reference to the fscrypt_master_key * struct was dropped; doing it here would be unnecessarily late. * Nevertheless, use kfree_sensitive() in case anything was missed. */ kfree_sensitive(mk); } void fscrypt_put_master_key(struct fscrypt_master_key *mk) { if (!refcount_dec_and_test(&mk->mk_struct_refs)) return; /* * No structural references left, so free ->mk_users, and also free the * fscrypt_master_key struct itself after an RCU grace period ensures * that concurrent keyring lookups can no longer find it. */ WARN_ON_ONCE(refcount_read(&mk->mk_active_refs) != 0); if (mk->mk_users) { /* Clear the keyring so the quota gets released right away. */ keyring_clear(mk->mk_users); key_put(mk->mk_users); mk->mk_users = NULL; } call_rcu(&mk->mk_rcu_head, fscrypt_free_master_key); } void fscrypt_put_master_key_activeref(struct super_block *sb, struct fscrypt_master_key *mk) { size_t i; if (!refcount_dec_and_test(&mk->mk_active_refs)) return; /* * No active references left, so complete the full removal of this * fscrypt_master_key struct by removing it from the keyring and * destroying any subkeys embedded in it. */ if (WARN_ON_ONCE(!sb->s_master_keys)) return; spin_lock(&sb->s_master_keys->lock); hlist_del_rcu(&mk->mk_node); spin_unlock(&sb->s_master_keys->lock); /* * ->mk_active_refs == 0 implies that ->mk_present is false and * ->mk_decrypted_inodes is empty. */ WARN_ON_ONCE(mk->mk_present); WARN_ON_ONCE(!list_empty(&mk->mk_decrypted_inodes)); for (i = 0; i <= FSCRYPT_MODE_MAX; i++) { fscrypt_destroy_prepared_key( sb, &mk->mk_direct_keys[i]); fscrypt_destroy_prepared_key( sb, &mk->mk_iv_ino_lblk_64_keys[i]); fscrypt_destroy_prepared_key( sb, &mk->mk_iv_ino_lblk_32_keys[i]); } memzero_explicit(&mk->mk_ino_hash_key, sizeof(mk->mk_ino_hash_key)); mk->mk_ino_hash_key_initialized = false; /* Drop the structural ref associated with the active refs. */ fscrypt_put_master_key(mk); } /* * This transitions the key state from present to incompletely removed, and then * potentially to absent (depending on whether inodes remain). */ static void fscrypt_initiate_key_removal(struct super_block *sb, struct fscrypt_master_key *mk) { WRITE_ONCE(mk->mk_present, false); wipe_master_key_secret(&mk->mk_secret); fscrypt_put_master_key_activeref(sb, mk); } static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec) { if (spec->__reserved) return false; return master_key_spec_len(spec) != 0; } static int fscrypt_user_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { /* * We just charge FSCRYPT_MAX_RAW_KEY_SIZE bytes to the user's key quota * for each key, regardless of the exact key size. The amount of memory * actually used is greater than the size of the raw key anyway. */ return key_payload_reserve(key, FSCRYPT_MAX_RAW_KEY_SIZE); } static void fscrypt_user_key_describe(const struct key *key, struct seq_file *m) { seq_puts(m, key->description); } /* * Type of key in ->mk_users. Each key of this type represents a particular * user who has added a particular master key. * * Note that the name of this key type really should be something like * ".fscrypt-user" instead of simply ".fscrypt". But the shorter name is chosen * mainly for simplicity of presentation in /proc/keys when read by a non-root * user. And it is expected to be rare that a key is actually added by multiple * users, since users should keep their encryption keys confidential. */ static struct key_type key_type_fscrypt_user = { .name = ".fscrypt", .instantiate = fscrypt_user_key_instantiate, .describe = fscrypt_user_key_describe, }; #define FSCRYPT_MK_USERS_DESCRIPTION_SIZE \ (CONST_STRLEN("fscrypt-") + 2 * FSCRYPT_KEY_IDENTIFIER_SIZE + \ CONST_STRLEN("-users") + 1) #define FSCRYPT_MK_USER_DESCRIPTION_SIZE \ (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + CONST_STRLEN(".uid.") + 10 + 1) static void format_mk_users_keyring_description( char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE], const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) { sprintf(description, "fscrypt-%*phN-users", FSCRYPT_KEY_IDENTIFIER_SIZE, mk_identifier); } static void format_mk_user_description( char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE], const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) { sprintf(description, "%*phN.uid.%u", FSCRYPT_KEY_IDENTIFIER_SIZE, mk_identifier, __kuid_val(current_fsuid())); } /* Create ->s_master_keys if needed. Synchronized by fscrypt_add_key_mutex. */ static int allocate_filesystem_keyring(struct super_block *sb) { struct fscrypt_keyring *keyring; if (sb->s_master_keys) return 0; keyring = kzalloc(sizeof(*keyring), GFP_KERNEL); if (!keyring) return -ENOMEM; spin_lock_init(&keyring->lock); /* * Pairs with the smp_load_acquire() in fscrypt_find_master_key(). * I.e., here we publish ->s_master_keys with a RELEASE barrier so that * concurrent tasks can ACQUIRE it. */ smp_store_release(&sb->s_master_keys, keyring); return 0; } /* * Release all encryption keys that have been added to the filesystem, along * with the keyring that contains them. * * This is called at unmount time, after all potentially-encrypted inodes have * been evicted. The filesystem's underlying block device(s) are still * available at this time; this is important because after user file accesses * have been allowed, this function may need to evict keys from the keyslots of * an inline crypto engine, which requires the block device(s). */ void fscrypt_destroy_keyring(struct super_block *sb) { struct fscrypt_keyring *keyring = sb->s_master_keys; size_t i; if (!keyring) return; for (i = 0; i < ARRAY_SIZE(keyring->key_hashtable); i++) { struct hlist_head *bucket = &keyring->key_hashtable[i]; struct fscrypt_master_key *mk; struct hlist_node *tmp; hlist_for_each_entry_safe(mk, tmp, bucket, mk_node) { /* * Since all potentially-encrypted inodes were already * evicted, every key remaining in the keyring should * have an empty inode list, and should only still be in * the keyring due to the single active ref associated * with ->mk_present. There should be no structural * refs beyond the one associated with the active ref. */ WARN_ON_ONCE(refcount_read(&mk->mk_active_refs) != 1); WARN_ON_ONCE(refcount_read(&mk->mk_struct_refs) != 1); WARN_ON_ONCE(!mk->mk_present); fscrypt_initiate_key_removal(sb, mk); } } kfree_sensitive(keyring); sb->s_master_keys = NULL; } static struct hlist_head * fscrypt_mk_hash_bucket(struct fscrypt_keyring *keyring, const struct fscrypt_key_specifier *mk_spec) { /* * Since key specifiers should be "random" values, it is sufficient to * use a trivial hash function that just takes the first several bits of * the key specifier. */ unsigned long i = get_unaligned((unsigned long *)&mk_spec->u); return &keyring->key_hashtable[i % ARRAY_SIZE(keyring->key_hashtable)]; } /* * Find the specified master key struct in ->s_master_keys and take a structural * ref to it. The structural ref guarantees that the key struct continues to * exist, but it does *not* guarantee that ->s_master_keys continues to contain * the key struct. The structural ref needs to be dropped by * fscrypt_put_master_key(). Returns NULL if the key struct is not found. */ struct fscrypt_master_key * fscrypt_find_master_key(struct super_block *sb, const struct fscrypt_key_specifier *mk_spec) { struct fscrypt_keyring *keyring; struct hlist_head *bucket; struct fscrypt_master_key *mk; /* * Pairs with the smp_store_release() in allocate_filesystem_keyring(). * I.e., another task can publish ->s_master_keys concurrently, * executing a RELEASE barrier. We need to use smp_load_acquire() here * to safely ACQUIRE the memory the other task published. */ keyring = smp_load_acquire(&sb->s_master_keys); if (keyring == NULL) return NULL; /* No keyring yet, so no keys yet. */ bucket = fscrypt_mk_hash_bucket(keyring, mk_spec); rcu_read_lock(); switch (mk_spec->type) { case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: hlist_for_each_entry_rcu(mk, bucket, mk_node) { if (mk->mk_spec.type == FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && memcmp(mk->mk_spec.u.descriptor, mk_spec->u.descriptor, FSCRYPT_KEY_DESCRIPTOR_SIZE) == 0 && refcount_inc_not_zero(&mk->mk_struct_refs)) goto out; } break; case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: hlist_for_each_entry_rcu(mk, bucket, mk_node) { if (mk->mk_spec.type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER && memcmp(mk->mk_spec.u.identifier, mk_spec->u.identifier, FSCRYPT_KEY_IDENTIFIER_SIZE) == 0 && refcount_inc_not_zero(&mk->mk_struct_refs)) goto out; } break; } mk = NULL; out: rcu_read_unlock(); return mk; } static int allocate_master_key_users_keyring(struct fscrypt_master_key *mk) { char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE]; struct key *keyring; format_mk_users_keyring_description(description, mk->mk_spec.u.identifier); keyring = keyring_alloc(description, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(), KEY_POS_SEARCH | KEY_USR_SEARCH | KEY_USR_READ | KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(keyring)) return PTR_ERR(keyring); mk->mk_users = keyring; return 0; } /* * Find the current user's "key" in the master key's ->mk_users. * Returns ERR_PTR(-ENOKEY) if not found. */ static struct key *find_master_key_user(struct fscrypt_master_key *mk) { char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE]; key_ref_t keyref; format_mk_user_description(description, mk->mk_spec.u.identifier); /* * We need to mark the keyring reference as "possessed" so that we * acquire permission to search it, via the KEY_POS_SEARCH permission. */ keyref = keyring_search(make_key_ref(mk->mk_users, true /*possessed*/), &key_type_fscrypt_user, description, false); if (IS_ERR(keyref)) { if (PTR_ERR(keyref) == -EAGAIN || /* not found */ PTR_ERR(keyref) == -EKEYREVOKED) /* recently invalidated */ keyref = ERR_PTR(-ENOKEY); return ERR_CAST(keyref); } return key_ref_to_ptr(keyref); } /* * Give the current user a "key" in ->mk_users. This charges the user's quota * and marks the master key as added by the current user, so that it cannot be * removed by another user with the key. Either ->mk_sem must be held for * write, or the master key must be still undergoing initialization. */ static int add_master_key_user(struct fscrypt_master_key *mk) { char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE]; struct key *mk_user; int err; format_mk_user_description(description, mk->mk_spec.u.identifier); mk_user = key_alloc(&key_type_fscrypt_user, description, current_fsuid(), current_gid(), current_cred(), KEY_POS_SEARCH | KEY_USR_VIEW, 0, NULL); if (IS_ERR(mk_user)) return PTR_ERR(mk_user); err = key_instantiate_and_link(mk_user, NULL, 0, mk->mk_users, NULL); key_put(mk_user); return err; } /* * Remove the current user's "key" from ->mk_users. * ->mk_sem must be held for write. * * Returns 0 if removed, -ENOKEY if not found, or another -errno code. */ static int remove_master_key_user(struct fscrypt_master_key *mk) { struct key *mk_user; int err; mk_user = find_master_key_user(mk); if (IS_ERR(mk_user)) return PTR_ERR(mk_user); err = key_unlink(mk->mk_users, mk_user); key_put(mk_user); return err; } /* * Allocate a new fscrypt_master_key, transfer the given secret over to it, and * insert it into sb->s_master_keys. */ static int add_new_master_key(struct super_block *sb, struct fscrypt_master_key_secret *secret, const struct fscrypt_key_specifier *mk_spec) { struct fscrypt_keyring *keyring = sb->s_master_keys; struct fscrypt_master_key *mk; int err; mk = kzalloc(sizeof(*mk), GFP_KERNEL); if (!mk) return -ENOMEM; init_rwsem(&mk->mk_sem); refcount_set(&mk->mk_struct_refs, 1); mk->mk_spec = *mk_spec; INIT_LIST_HEAD(&mk->mk_decrypted_inodes); spin_lock_init(&mk->mk_decrypted_inodes_lock); if (mk_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) { err = allocate_master_key_users_keyring(mk); if (err) goto out_put; err = add_master_key_user(mk); if (err) goto out_put; } move_master_key_secret(&mk->mk_secret, secret); mk->mk_present = true; refcount_set(&mk->mk_active_refs, 1); /* ->mk_present is true */ spin_lock(&keyring->lock); hlist_add_head_rcu(&mk->mk_node, fscrypt_mk_hash_bucket(keyring, mk_spec)); spin_unlock(&keyring->lock); return 0; out_put: fscrypt_put_master_key(mk); return err; } #define KEY_DEAD 1 static int add_existing_master_key(struct fscrypt_master_key *mk, struct fscrypt_master_key_secret *secret) { int err; /* * If the current user is already in ->mk_users, then there's nothing to * do. Otherwise, we need to add the user to ->mk_users. (Neither is * applicable for v1 policy keys, which have NULL ->mk_users.) */ if (mk->mk_users) { struct key *mk_user = find_master_key_user(mk); if (mk_user != ERR_PTR(-ENOKEY)) { if (IS_ERR(mk_user)) return PTR_ERR(mk_user); key_put(mk_user); return 0; } err = add_master_key_user(mk); if (err) return err; } /* If the key is incompletely removed, make it present again. */ if (!mk->mk_present) { if (!refcount_inc_not_zero(&mk->mk_active_refs)) { /* * Raced with the last active ref being dropped, so the * key has become, or is about to become, "absent". * Therefore, we need to allocate a new key struct. */ return KEY_DEAD; } move_master_key_secret(&mk->mk_secret, secret); WRITE_ONCE(mk->mk_present, true); } return 0; } static int do_add_master_key(struct super_block *sb, struct fscrypt_master_key_secret *secret, const struct fscrypt_key_specifier *mk_spec) { static DEFINE_MUTEX(fscrypt_add_key_mutex); struct fscrypt_master_key *mk; int err; mutex_lock(&fscrypt_add_key_mutex); /* serialize find + link */ mk = fscrypt_find_master_key(sb, mk_spec); if (!mk) { /* Didn't find the key in ->s_master_keys. Add it. */ err = allocate_filesystem_keyring(sb); if (!err) err = add_new_master_key(sb, secret, mk_spec); } else { /* * Found the key in ->s_master_keys. Add the user to ->mk_users * if needed, and make the key "present" again if possible. */ down_write(&mk->mk_sem); err = add_existing_master_key(mk, secret); up_write(&mk->mk_sem); if (err == KEY_DEAD) { /* * We found a key struct, but it's already been fully * removed. Ignore the old struct and add a new one. * fscrypt_add_key_mutex means we don't need to worry * about concurrent adds. */ err = add_new_master_key(sb, secret, mk_spec); } fscrypt_put_master_key(mk); } mutex_unlock(&fscrypt_add_key_mutex); return err; } static int add_master_key(struct super_block *sb, struct fscrypt_master_key_secret *secret, struct fscrypt_key_specifier *key_spec) { int err; if (key_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) { u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]; u8 *kdf_key = secret->bytes; unsigned int kdf_key_size = secret->size; u8 keyid_kdf_ctx = HKDF_CONTEXT_KEY_IDENTIFIER_FOR_RAW_KEY; /* * For raw keys, the fscrypt master key is used directly as the * fscrypt KDF key. For hardware-wrapped keys, we have to pass * the master key to the hardware to derive the KDF key, which * is then only used to derive non-file-contents subkeys. */ if (secret->is_hw_wrapped) { err = fscrypt_derive_sw_secret(sb, secret->bytes, secret->size, sw_secret); if (err) return err; kdf_key = sw_secret; kdf_key_size = sizeof(sw_secret); /* * To avoid weird behavior if someone manages to * determine sw_secret and add it as a raw key, ensure * that hardware-wrapped keys and raw keys will have * different key identifiers by deriving their key * identifiers using different KDF contexts. */ keyid_kdf_ctx = HKDF_CONTEXT_KEY_IDENTIFIER_FOR_HW_WRAPPED_KEY; } err = fscrypt_init_hkdf(&secret->hkdf, kdf_key, kdf_key_size); /* * Now that the KDF context is initialized, the raw KDF key is * no longer needed. */ memzero_explicit(kdf_key, kdf_key_size); if (err) return err; /* Calculate the key identifier */ err = fscrypt_hkdf_expand(&secret->hkdf, keyid_kdf_ctx, NULL, 0, key_spec->u.identifier, FSCRYPT_KEY_IDENTIFIER_SIZE); if (err) return err; } return do_add_master_key(sb, secret, key_spec); } /* * Validate the size of an fscrypt master key being added. Note that this is * just an initial check, as we don't know which ciphers will be used yet. * There is a stricter size check later when the key is actually used by a file. */ static inline bool fscrypt_valid_key_size(size_t size, u32 add_key_flags) { u32 max_size = (add_key_flags & FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) ? FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE : FSCRYPT_MAX_RAW_KEY_SIZE; return size >= FSCRYPT_MIN_KEY_SIZE && size <= max_size; } static int fscrypt_provisioning_key_preparse(struct key_preparsed_payload *prep) { const struct fscrypt_provisioning_key_payload *payload = prep->data; if (prep->datalen < sizeof(*payload)) return -EINVAL; if (!fscrypt_valid_key_size(prep->datalen - sizeof(*payload), payload->flags)) return -EINVAL; if (payload->type != FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && payload->type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) return -EINVAL; if (payload->flags & ~FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) return -EINVAL; prep->payload.data[0] = kmemdup(payload, prep->datalen, GFP_KERNEL); if (!prep->payload.data[0]) return -ENOMEM; prep->quotalen = prep->datalen; return 0; } static void fscrypt_provisioning_key_free_preparse( struct key_preparsed_payload *prep) { kfree_sensitive(prep->payload.data[0]); } static void fscrypt_provisioning_key_describe(const struct key *key, struct seq_file *m) { seq_puts(m, key->description); if (key_is_positive(key)) { const struct fscrypt_provisioning_key_payload *payload = key->payload.data[0]; seq_printf(m, ": %u [%u]", key->datalen, payload->type); } } static void fscrypt_provisioning_key_destroy(struct key *key) { kfree_sensitive(key->payload.data[0]); } static struct key_type key_type_fscrypt_provisioning = { .name = "fscrypt-provisioning", .preparse = fscrypt_provisioning_key_preparse, .free_preparse = fscrypt_provisioning_key_free_preparse, .instantiate = generic_key_instantiate, .describe = fscrypt_provisioning_key_describe, .destroy = fscrypt_provisioning_key_destroy, }; /* * Retrieve the key from the Linux keyring key specified by 'key_id', and store * it into 'secret'. * * The key must be of type "fscrypt-provisioning" and must have the 'type' and * 'flags' field of the payload set to the given values, indicating that the key * is intended for use for the specified purpose. We don't use the "logon" key * type because there's no way to completely restrict the use of such keys; they * can be used by any kernel API that accepts "logon" keys and doesn't require a * specific service prefix. * * The ability to specify the key via Linux keyring key is intended for cases * where userspace needs to re-add keys after the filesystem is unmounted and * re-mounted. Most users should just provide the key directly instead. */ static int get_keyring_key(u32 key_id, u32 type, u32 flags, struct fscrypt_master_key_secret *secret) { key_ref_t ref; struct key *key; const struct fscrypt_provisioning_key_payload *payload; int err; ref = lookup_user_key(key_id, 0, KEY_NEED_SEARCH); if (IS_ERR(ref)) return PTR_ERR(ref); key = key_ref_to_ptr(ref); if (key->type != &key_type_fscrypt_provisioning) goto bad_key; payload = key->payload.data[0]; /* * Don't allow fscrypt v1 keys to be used as v2 keys and vice versa. * Similarly, don't allow hardware-wrapped keys to be used as * non-hardware-wrapped keys and vice versa. */ if (payload->type != type || payload->flags != flags) goto bad_key; secret->size = key->datalen - sizeof(*payload); memcpy(secret->bytes, payload->raw, secret->size); err = 0; goto out_put; bad_key: err = -EKEYREJECTED; out_put: key_ref_put(ref); return err; } /* * Add a master encryption key to the filesystem, causing all files which were * encrypted with it to appear "unlocked" (decrypted) when accessed. * * When adding a key for use by v1 encryption policies, this ioctl is * privileged, and userspace must provide the 'key_descriptor'. * * When adding a key for use by v2+ encryption policies, this ioctl is * unprivileged. This is needed, in general, to allow non-root users to use * encryption without encountering the visibility problems of process-subscribed * keyrings and the inability to properly remove keys. This works by having * each key identified by its cryptographically secure hash --- the * 'key_identifier'. The cryptographic hash ensures that a malicious user * cannot add the wrong key for a given identifier. Furthermore, each added key * is charged to the appropriate user's quota for the keyrings service, which * prevents a malicious user from adding too many keys. Finally, we forbid a * user from removing a key while other users have added it too, which prevents * a user who knows another user's key from causing a denial-of-service by * removing it at an inopportune time. (We tolerate that a user who knows a key * can prevent other users from removing it.) * * For more details, see the "FS_IOC_ADD_ENCRYPTION_KEY" section of * Documentation/filesystems/fscrypt.rst. */ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) { struct super_block *sb = file_inode(filp)->i_sb; struct fscrypt_add_key_arg __user *uarg = _uarg; struct fscrypt_add_key_arg arg; struct fscrypt_master_key_secret secret; int err; if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; if (!valid_key_spec(&arg.key_spec)) return -EINVAL; if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; /* * Only root can add keys that are identified by an arbitrary descriptor * rather than by a cryptographic hash --- since otherwise a malicious * user could add the wrong key. */ if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && !capable(CAP_SYS_ADMIN)) return -EACCES; memset(&secret, 0, sizeof(secret)); if (arg.flags) { if (arg.flags & ~FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) return -EINVAL; if (arg.key_spec.type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) return -EINVAL; secret.is_hw_wrapped = true; } if (arg.key_id) { if (arg.raw_size != 0) return -EINVAL; err = get_keyring_key(arg.key_id, arg.key_spec.type, arg.flags, &secret); if (err) goto out_wipe_secret; } else { if (!fscrypt_valid_key_size(arg.raw_size, arg.flags)) return -EINVAL; secret.size = arg.raw_size; err = -EFAULT; if (copy_from_user(secret.bytes, uarg->raw, secret.size)) goto out_wipe_secret; } err = add_master_key(sb, &secret, &arg.key_spec); if (err) goto out_wipe_secret; /* Return the key identifier to userspace, if applicable */ err = -EFAULT; if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER && copy_to_user(uarg->key_spec.u.identifier, arg.key_spec.u.identifier, FSCRYPT_KEY_IDENTIFIER_SIZE)) goto out_wipe_secret; err = 0; out_wipe_secret: wipe_master_key_secret(&secret); return err; } EXPORT_SYMBOL_GPL(fscrypt_ioctl_add_key); static void fscrypt_get_test_dummy_secret(struct fscrypt_master_key_secret *secret) { static u8 test_key[FSCRYPT_MAX_RAW_KEY_SIZE]; get_random_once(test_key, sizeof(test_key)); memset(secret, 0, sizeof(*secret)); secret->size = sizeof(test_key); memcpy(secret->bytes, test_key, sizeof(test_key)); } int fscrypt_get_test_dummy_key_identifier( u8 key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) { struct fscrypt_master_key_secret secret; int err; fscrypt_get_test_dummy_secret(&secret); err = fscrypt_init_hkdf(&secret.hkdf, secret.bytes, secret.size); if (err) goto out; err = fscrypt_hkdf_expand(&secret.hkdf, HKDF_CONTEXT_KEY_IDENTIFIER_FOR_RAW_KEY, NULL, 0, key_identifier, FSCRYPT_KEY_IDENTIFIER_SIZE); out: wipe_master_key_secret(&secret); return err; } /** * fscrypt_add_test_dummy_key() - add the test dummy encryption key * @sb: the filesystem instance to add the key to * @key_spec: the key specifier of the test dummy encryption key * * Add the key for the test_dummy_encryption mount option to the filesystem. To * prevent misuse of this mount option, a per-boot random key is used instead of * a hardcoded one. This makes it so that any encrypted files created using * this option won't be accessible after a reboot. * * Return: 0 on success, -errno on failure */ int fscrypt_add_test_dummy_key(struct super_block *sb, struct fscrypt_key_specifier *key_spec) { struct fscrypt_master_key_secret secret; int err; fscrypt_get_test_dummy_secret(&secret); err = add_master_key(sb, &secret, key_spec); wipe_master_key_secret(&secret); return err; } /* * Verify that the current user has added a master key with the given identifier * (returns -ENOKEY if not). This is needed to prevent a user from encrypting * their files using some other user's key which they don't actually know. * Cryptographically this isn't much of a problem, but the semantics of this * would be a bit weird, so it's best to just forbid it. * * The system administrator (CAP_FOWNER) can override this, which should be * enough for any use cases where encryption policies are being set using keys * that were chosen ahead of time but aren't available at the moment. * * Note that the key may have already removed by the time this returns, but * that's okay; we just care whether the key was there at some point. * * Return: 0 if the key is added, -ENOKEY if it isn't, or another -errno code */ int fscrypt_verify_key_added(struct super_block *sb, const u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) { struct fscrypt_key_specifier mk_spec; struct fscrypt_master_key *mk; struct key *mk_user; int err; mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER; memcpy(mk_spec.u.identifier, identifier, FSCRYPT_KEY_IDENTIFIER_SIZE); mk = fscrypt_find_master_key(sb, &mk_spec); if (!mk) { err = -ENOKEY; goto out; } down_read(&mk->mk_sem); mk_user = find_master_key_user(mk); if (IS_ERR(mk_user)) { err = PTR_ERR(mk_user); } else { key_put(mk_user); err = 0; } up_read(&mk->mk_sem); fscrypt_put_master_key(mk); out: if (err == -ENOKEY && capable(CAP_FOWNER)) err = 0; return err; } /* * Try to evict the inode's dentries from the dentry cache. If the inode is a * directory, then it can have at most one dentry; however, that dentry may be * pinned by child dentries, so first try to evict the children too. */ static void shrink_dcache_inode(struct inode *inode) { struct dentry *dentry; if (S_ISDIR(inode->i_mode)) { dentry = d_find_any_alias(inode); if (dentry) { shrink_dcache_parent(dentry); dput(dentry); } } d_prune_aliases(inode); } static void evict_dentries_for_decrypted_inodes(struct fscrypt_master_key *mk) { struct fscrypt_inode_info *ci; struct inode *inode; struct inode *toput_inode = NULL; spin_lock(&mk->mk_decrypted_inodes_lock); list_for_each_entry(ci, &mk->mk_decrypted_inodes, ci_master_key_link) { inode = ci->ci_inode; spin_lock(&inode->i_lock); if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) { spin_unlock(&inode->i_lock); continue; } __iget(inode); spin_unlock(&inode->i_lock); spin_unlock(&mk->mk_decrypted_inodes_lock); shrink_dcache_inode(inode); iput(toput_inode); toput_inode = inode; spin_lock(&mk->mk_decrypted_inodes_lock); } spin_unlock(&mk->mk_decrypted_inodes_lock); iput(toput_inode); } static int check_for_busy_inodes(struct super_block *sb, struct fscrypt_master_key *mk) { struct list_head *pos; size_t busy_count = 0; unsigned long ino; char ino_str[50] = ""; spin_lock(&mk->mk_decrypted_inodes_lock); list_for_each(pos, &mk->mk_decrypted_inodes) busy_count++; if (busy_count == 0) { spin_unlock(&mk->mk_decrypted_inodes_lock); return 0; } { /* select an example file to show for debugging purposes */ struct inode *inode = list_first_entry(&mk->mk_decrypted_inodes, struct fscrypt_inode_info, ci_master_key_link)->ci_inode; ino = inode->i_ino; } spin_unlock(&mk->mk_decrypted_inodes_lock); /* If the inode is currently being created, ino may still be 0. */ if (ino) snprintf(ino_str, sizeof(ino_str), ", including ino %lu", ino); fscrypt_warn(NULL, "%s: %zu inode(s) still busy after removing key with %s %*phN%s", sb->s_id, busy_count, master_key_spec_type(&mk->mk_spec), master_key_spec_len(&mk->mk_spec), (u8 *)&mk->mk_spec.u, ino_str); return -EBUSY; } static int try_to_lock_encrypted_files(struct super_block *sb, struct fscrypt_master_key *mk) { int err1; int err2; /* * An inode can't be evicted while it is dirty or has dirty pages. * Thus, we first have to clean the inodes in ->mk_decrypted_inodes. * * Just do it the easy way: call sync_filesystem(). It's overkill, but * it works, and it's more important to minimize the amount of caches we * drop than the amount of data we sync. Also, unprivileged users can * already call sync_filesystem() via sys_syncfs() or sys_sync(). */ down_read(&sb->s_umount); err1 = sync_filesystem(sb); up_read(&sb->s_umount); /* If a sync error occurs, still try to evict as much as possible. */ /* * Inodes are pinned by their dentries, so we have to evict their * dentries. shrink_dcache_sb() would suffice, but would be overkill * and inappropriate for use by unprivileged users. So instead go * through the inodes' alias lists and try to evict each dentry. */ evict_dentries_for_decrypted_inodes(mk); /* * evict_dentries_for_decrypted_inodes() already iput() each inode in * the list; any inodes for which that dropped the last reference will * have been evicted due to fscrypt_drop_inode() detecting the key * removal and telling the VFS to evict the inode. So to finish, we * just need to check whether any inodes couldn't be evicted. */ err2 = check_for_busy_inodes(sb, mk); return err1 ?: err2; } /* * Try to remove an fscrypt master encryption key. * * FS_IOC_REMOVE_ENCRYPTION_KEY (all_users=false) removes the current user's * claim to the key, then removes the key itself if no other users have claims. * FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS (all_users=true) always removes the * key itself. * * To "remove the key itself", first we transition the key to the "incompletely * removed" state, so that no more inodes can be unlocked with it. Then we try * to evict all cached inodes that had been unlocked with the key. * * If all inodes were evicted, then we unlink the fscrypt_master_key from the * keyring. Otherwise it remains in the keyring in the "incompletely removed" * state where it tracks the list of remaining inodes. Userspace can execute * the ioctl again later to retry eviction, or alternatively can re-add the key. * * For more details, see the "Removing keys" section of * Documentation/filesystems/fscrypt.rst. */ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users) { struct super_block *sb = file_inode(filp)->i_sb; struct fscrypt_remove_key_arg __user *uarg = _uarg; struct fscrypt_remove_key_arg arg; struct fscrypt_master_key *mk; u32 status_flags = 0; int err; bool inodes_remain; if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; if (!valid_key_spec(&arg.key_spec)) return -EINVAL; if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; /* * Only root can add and remove keys that are identified by an arbitrary * descriptor rather than by a cryptographic hash. */ if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && !capable(CAP_SYS_ADMIN)) return -EACCES; /* Find the key being removed. */ mk = fscrypt_find_master_key(sb, &arg.key_spec); if (!mk) return -ENOKEY; down_write(&mk->mk_sem); /* If relevant, remove current user's (or all users) claim to the key */ if (mk->mk_users && mk->mk_users->keys.nr_leaves_on_tree != 0) { if (all_users) err = keyring_clear(mk->mk_users); else err = remove_master_key_user(mk); if (err) { up_write(&mk->mk_sem); goto out_put_key; } if (mk->mk_users->keys.nr_leaves_on_tree != 0) { /* * Other users have still added the key too. We removed * the current user's claim to the key, but we still * can't remove the key itself. */ status_flags |= FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS; err = 0; up_write(&mk->mk_sem); goto out_put_key; } } /* No user claims remaining. Initiate removal of the key. */ err = -ENOKEY; if (mk->mk_present) { fscrypt_initiate_key_removal(sb, mk); err = 0; } inodes_remain = refcount_read(&mk->mk_active_refs) > 0; up_write(&mk->mk_sem); if (inodes_remain) { /* Some inodes still reference this key; try to evict them. */ err = try_to_lock_encrypted_files(sb, mk); if (err == -EBUSY) { status_flags |= FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY; err = 0; } } /* * We return 0 if we successfully did something: removed a claim to the * key, initiated removal of the key, or tried locking the files again. * Users need to check the informational status flags if they care * whether the key has been fully removed including all files locked. */ out_put_key: fscrypt_put_master_key(mk); if (err == 0) err = put_user(status_flags, &uarg->removal_status_flags); return err; } int fscrypt_ioctl_remove_key(struct file *filp, void __user *uarg) { return do_remove_key(filp, uarg, false); } EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key); int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *uarg) { if (!capable(CAP_SYS_ADMIN)) return -EACCES; return do_remove_key(filp, uarg, true); } EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key_all_users); /* * Retrieve the status of an fscrypt master encryption key. * * We set ->status to indicate whether the key is absent, present, or * incompletely removed. (For an explanation of what these statuses mean and * how they are represented internally, see struct fscrypt_master_key.) This * field allows applications to easily determine the status of an encrypted * directory without using a hack such as trying to open a regular file in it * (which can confuse the "incompletely removed" status with absent or present). * * In addition, for v2 policy keys we allow applications to determine, via * ->status_flags and ->user_count, whether the key has been added by the * current user, by other users, or by both. Most applications should not need * this, since ordinarily only one user should know a given key. However, if a * secret key is shared by multiple users, applications may wish to add an * already-present key to prevent other users from removing it. This ioctl can * be used to check whether that really is the case before the work is done to * add the key --- which might e.g. require prompting the user for a passphrase. * * For more details, see the "FS_IOC_GET_ENCRYPTION_KEY_STATUS" section of * Documentation/filesystems/fscrypt.rst. */ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg) { struct super_block *sb = file_inode(filp)->i_sb; struct fscrypt_get_key_status_arg arg; struct fscrypt_master_key *mk; int err; if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; if (!valid_key_spec(&arg.key_spec)) return -EINVAL; if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; arg.status_flags = 0; arg.user_count = 0; memset(arg.__out_reserved, 0, sizeof(arg.__out_reserved)); mk = fscrypt_find_master_key(sb, &arg.key_spec); if (!mk) { arg.status = FSCRYPT_KEY_STATUS_ABSENT; err = 0; goto out; } down_read(&mk->mk_sem); if (!mk->mk_present) { arg.status = refcount_read(&mk->mk_active_refs) > 0 ? FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED : FSCRYPT_KEY_STATUS_ABSENT /* raced with full removal */; err = 0; goto out_release_key; } arg.status = FSCRYPT_KEY_STATUS_PRESENT; if (mk->mk_users) { struct key *mk_user; arg.user_count = mk->mk_users->keys.nr_leaves_on_tree; mk_user = find_master_key_user(mk); if (!IS_ERR(mk_user)) { arg.status_flags |= FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF; key_put(mk_user); } else if (mk_user != ERR_PTR(-ENOKEY)) { err = PTR_ERR(mk_user); goto out_release_key; } } err = 0; out_release_key: up_read(&mk->mk_sem); fscrypt_put_master_key(mk); out: if (!err && copy_to_user(uarg, &arg, sizeof(arg))) err = -EFAULT; return err; } EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_key_status); int __init fscrypt_init_keyring(void) { int err; err = register_key_type(&key_type_fscrypt_user); if (err) return err; err = register_key_type(&key_type_fscrypt_provisioning); if (err) goto err_unregister_fscrypt_user; return 0; err_unregister_fscrypt_user: unregister_key_type(&key_type_fscrypt_user); return err; } |
| 20 20 14 14 33 33 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 | // SPDX-License-Identifier: GPL-2.0 /* * Wifi Band Exclusion Interface for WLAN * Copyright (C) 2023 Advanced Micro Devices * Copyright (C) 2025 Intel Corporation * */ #include <linux/acpi_amd_wbrf.h> #include <linux/units.h> #include <net/cfg80211.h> #include "ieee80211_i.h" void ieee80211_check_wbrf_support(struct ieee80211_local *local) { struct wiphy *wiphy = local->hw.wiphy; struct device *dev; if (!wiphy) return; dev = wiphy->dev.parent; if (!dev) return; local->wbrf_supported = acpi_amd_wbrf_supported_producer(dev); } static void get_chan_freq_boundary(u32 center_freq, u32 bandwidth, u64 *start, u64 *end) { bandwidth *= KHZ_PER_MHZ; center_freq *= KHZ_PER_MHZ; *start = center_freq - bandwidth / 2; *end = center_freq + bandwidth / 2; /* Frequency in Hz is expected */ *start = *start * HZ_PER_KHZ; *end = *end * HZ_PER_KHZ; } static void get_ranges_from_chandef(struct cfg80211_chan_def *chandef, struct wbrf_ranges_in_out *ranges_in) { u64 start_freq1, end_freq1; u64 start_freq2, end_freq2; int bandwidth; bandwidth = cfg80211_chandef_get_width(chandef); get_chan_freq_boundary(chandef->center_freq1, bandwidth, &start_freq1, &end_freq1); ranges_in->band_list[0].start = start_freq1; ranges_in->band_list[0].end = end_freq1; ranges_in->num_of_ranges = 1; if (chandef->width == NL80211_CHAN_WIDTH_80P80) { get_chan_freq_boundary(chandef->center_freq2, bandwidth, &start_freq2, &end_freq2); ranges_in->band_list[1].start = start_freq2; ranges_in->band_list[1].end = end_freq2; ranges_in->num_of_ranges++; } } void ieee80211_add_wbrf(struct ieee80211_local *local, struct cfg80211_chan_def *chandef) { struct wbrf_ranges_in_out ranges_in = {0}; struct device *dev; if (!local->wbrf_supported) return; dev = local->hw.wiphy->dev.parent; get_ranges_from_chandef(chandef, &ranges_in); acpi_amd_wbrf_add_remove(dev, WBRF_RECORD_ADD, &ranges_in); } void ieee80211_remove_wbrf(struct ieee80211_local *local, struct cfg80211_chan_def *chandef) { struct wbrf_ranges_in_out ranges_in = {0}; struct device *dev; if (!local->wbrf_supported) return; dev = local->hw.wiphy->dev.parent; get_ranges_from_chandef(chandef, &ranges_in); acpi_amd_wbrf_add_remove(dev, WBRF_RECORD_REMOVE, &ranges_in); } |
| 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 | // SPDX-License-Identifier: GPL-2.0-only /* * HID driver for THQ PS3 uDraw tablet * * Copyright (C) 2016 Red Hat Inc. All Rights Reserved */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>"); MODULE_DESCRIPTION("PS3 uDraw tablet driver"); MODULE_LICENSE("GPL"); /* * Protocol information from: * https://brandonw.net/udraw/ * and the source code of: * https://vvvv.org/contribution/udraw-hid */ /* * The device is setup with multiple input devices: * - the touch area which works as a touchpad * - the tablet area which works as a touchpad/drawing tablet * - a joypad with a d-pad, and 7 buttons * - an accelerometer device */ enum { TOUCH_NONE, TOUCH_PEN, TOUCH_FINGER, TOUCH_TWOFINGER }; enum { AXIS_X, AXIS_Y, AXIS_Z }; /* * Accelerometer min/max values * in order, X, Y and Z */ static struct { int min; int max; } accel_limits[] = { [AXIS_X] = { 490, 534 }, [AXIS_Y] = { 490, 534 }, [AXIS_Z] = { 492, 536 } }; #define DEVICE_NAME "THQ uDraw Game Tablet for PS3" /* resolution in pixels */ #define RES_X 1920 #define RES_Y 1080 /* size in mm */ #define WIDTH 160 #define HEIGHT 90 #define PRESSURE_OFFSET 113 #define MAX_PRESSURE (255 - PRESSURE_OFFSET) struct udraw { struct input_dev *joy_input_dev; struct input_dev *touch_input_dev; struct input_dev *pen_input_dev; struct input_dev *accel_input_dev; struct hid_device *hdev; /* * The device's two-finger support is pretty unreliable, as * the device could report a single touch when the two fingers * are too close together, and the distance between fingers, even * though reported is not in the same unit as the touches. * * We'll make do without it, and try to report the first touch * as reliably as possible. */ int last_one_finger_x; int last_one_finger_y; int last_two_finger_x; int last_two_finger_y; }; static int clamp_accel(int axis, int offset) { axis = clamp(axis, accel_limits[offset].min, accel_limits[offset].max); axis = (axis - accel_limits[offset].min) / ((accel_limits[offset].max - accel_limits[offset].min) * 0xFF); return axis; } static int udraw_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int len) { struct udraw *udraw = hid_get_drvdata(hdev); int touch; int x, y, z; if (len != 27) return 0; if (data[11] == 0x00) touch = TOUCH_NONE; else if (data[11] == 0x40) touch = TOUCH_PEN; else if (data[11] == 0x80) touch = TOUCH_FINGER; else touch = TOUCH_TWOFINGER; /* joypad */ input_report_key(udraw->joy_input_dev, BTN_WEST, data[0] & 1); input_report_key(udraw->joy_input_dev, BTN_SOUTH, !!(data[0] & 2)); input_report_key(udraw->joy_input_dev, BTN_EAST, !!(data[0] & 4)); input_report_key(udraw->joy_input_dev, BTN_NORTH, !!(data[0] & 8)); input_report_key(udraw->joy_input_dev, BTN_SELECT, !!(data[1] & 1)); input_report_key(udraw->joy_input_dev, BTN_START, !!(data[1] & 2)); input_report_key(udraw->joy_input_dev, BTN_MODE, !!(data[1] & 16)); x = y = 0; switch (data[2]) { case 0x0: y = -127; break; case 0x1: y = -127; x = 127; break; case 0x2: x = 127; break; case 0x3: y = 127; x = 127; break; case 0x4: y = 127; break; case 0x5: y = 127; x = -127; break; case 0x6: x = -127; break; case 0x7: y = -127; x = -127; break; default: break; } input_report_abs(udraw->joy_input_dev, ABS_X, x); input_report_abs(udraw->joy_input_dev, ABS_Y, y); input_sync(udraw->joy_input_dev); /* For pen and touchpad */ x = y = 0; if (touch != TOUCH_NONE) { if (data[15] != 0x0F) x = data[15] * 256 + data[17]; if (data[16] != 0x0F) y = data[16] * 256 + data[18]; } if (touch == TOUCH_FINGER) { /* Save the last one-finger touch */ udraw->last_one_finger_x = x; udraw->last_one_finger_y = y; udraw->last_two_finger_x = -1; udraw->last_two_finger_y = -1; } else if (touch == TOUCH_TWOFINGER) { /* * We have a problem because x/y is the one for the * second finger but we want the first finger given * to user-space otherwise it'll look as if it jumped. * * See the udraw struct definition for why this was * implemented this way. */ if (udraw->last_two_finger_x == -1) { /* Save the position of the 2nd finger */ udraw->last_two_finger_x = x; udraw->last_two_finger_y = y; x = udraw->last_one_finger_x; y = udraw->last_one_finger_y; } else { /* * Offset the 2-finger coords using the * saved data from the first finger */ x = x - (udraw->last_two_finger_x - udraw->last_one_finger_x); y = y - (udraw->last_two_finger_y - udraw->last_one_finger_y); } } /* touchpad */ if (touch == TOUCH_FINGER || touch == TOUCH_TWOFINGER) { input_report_key(udraw->touch_input_dev, BTN_TOUCH, 1); input_report_key(udraw->touch_input_dev, BTN_TOOL_FINGER, touch == TOUCH_FINGER); input_report_key(udraw->touch_input_dev, BTN_TOOL_DOUBLETAP, touch == TOUCH_TWOFINGER); input_report_abs(udraw->touch_input_dev, ABS_X, x); input_report_abs(udraw->touch_input_dev, ABS_Y, y); } else { input_report_key(udraw->touch_input_dev, BTN_TOUCH, 0); input_report_key(udraw->touch_input_dev, BTN_TOOL_FINGER, 0); input_report_key(udraw->touch_input_dev, BTN_TOOL_DOUBLETAP, 0); } input_sync(udraw->touch_input_dev); /* pen */ if (touch == TOUCH_PEN) { int level; level = clamp(data[13] - PRESSURE_OFFSET, 0, MAX_PRESSURE); input_report_key(udraw->pen_input_dev, BTN_TOUCH, (level != 0)); input_report_key(udraw->pen_input_dev, BTN_TOOL_PEN, 1); input_report_abs(udraw->pen_input_dev, ABS_PRESSURE, level); input_report_abs(udraw->pen_input_dev, ABS_X, x); input_report_abs(udraw->pen_input_dev, ABS_Y, y); } else { input_report_key(udraw->pen_input_dev, BTN_TOUCH, 0); input_report_key(udraw->pen_input_dev, BTN_TOOL_PEN, 0); input_report_abs(udraw->pen_input_dev, ABS_PRESSURE, 0); } input_sync(udraw->pen_input_dev); /* accel */ x = (data[19] + (data[20] << 8)); x = clamp_accel(x, AXIS_X); y = (data[21] + (data[22] << 8)); y = clamp_accel(y, AXIS_Y); z = (data[23] + (data[24] << 8)); z = clamp_accel(z, AXIS_Z); input_report_abs(udraw->accel_input_dev, ABS_X, x); input_report_abs(udraw->accel_input_dev, ABS_Y, y); input_report_abs(udraw->accel_input_dev, ABS_Z, z); input_sync(udraw->accel_input_dev); /* let hidraw and hiddev handle the report */ return 0; } static int udraw_open(struct input_dev *dev) { struct udraw *udraw = input_get_drvdata(dev); return hid_hw_open(udraw->hdev); } static void udraw_close(struct input_dev *dev) { struct udraw *udraw = input_get_drvdata(dev); hid_hw_close(udraw->hdev); } static struct input_dev *allocate_and_setup(struct hid_device *hdev, const char *name) { struct input_dev *input_dev; input_dev = devm_input_allocate_device(&hdev->dev); if (!input_dev) return NULL; input_dev->name = name; input_dev->phys = hdev->phys; input_dev->dev.parent = &hdev->dev; input_dev->open = udraw_open; input_dev->close = udraw_close; input_dev->uniq = hdev->uniq; input_dev->id.bustype = hdev->bus; input_dev->id.vendor = hdev->vendor; input_dev->id.product = hdev->product; input_dev->id.version = hdev->version; input_set_drvdata(input_dev, hid_get_drvdata(hdev)); return input_dev; } static bool udraw_setup_touch(struct udraw *udraw, struct hid_device *hdev) { struct input_dev *input_dev; input_dev = allocate_and_setup(hdev, DEVICE_NAME " Touchpad"); if (!input_dev) return false; input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY); input_set_abs_params(input_dev, ABS_X, 0, RES_X, 1, 0); input_abs_set_res(input_dev, ABS_X, RES_X / WIDTH); input_set_abs_params(input_dev, ABS_Y, 0, RES_Y, 1, 0); input_abs_set_res(input_dev, ABS_Y, RES_Y / HEIGHT); set_bit(BTN_TOUCH, input_dev->keybit); set_bit(BTN_TOOL_FINGER, input_dev->keybit); set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); set_bit(INPUT_PROP_POINTER, input_dev->propbit); udraw->touch_input_dev = input_dev; return true; } static bool udraw_setup_pen(struct udraw *udraw, struct hid_device *hdev) { struct input_dev *input_dev; input_dev = allocate_and_setup(hdev, DEVICE_NAME " Pen"); if (!input_dev) return false; input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY); input_set_abs_params(input_dev, ABS_X, 0, RES_X, 1, 0); input_abs_set_res(input_dev, ABS_X, RES_X / WIDTH); input_set_abs_params(input_dev, ABS_Y, 0, RES_Y, 1, 0); input_abs_set_res(input_dev, ABS_Y, RES_Y / HEIGHT); input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_PRESSURE, 0, 0); set_bit(BTN_TOUCH, input_dev->keybit); set_bit(BTN_TOOL_PEN, input_dev->keybit); set_bit(INPUT_PROP_POINTER, input_dev->propbit); udraw->pen_input_dev = input_dev; return true; } static bool udraw_setup_accel(struct udraw *udraw, struct hid_device *hdev) { struct input_dev *input_dev; input_dev = allocate_and_setup(hdev, DEVICE_NAME " Accelerometer"); if (!input_dev) return false; input_dev->evbit[0] = BIT(EV_ABS); /* 1G accel is reported as ~256, so clamp to 2G */ input_set_abs_params(input_dev, ABS_X, -512, 512, 0, 0); input_set_abs_params(input_dev, ABS_Y, -512, 512, 0, 0); input_set_abs_params(input_dev, ABS_Z, -512, 512, 0, 0); set_bit(INPUT_PROP_ACCELEROMETER, input_dev->propbit); udraw->accel_input_dev = input_dev; return true; } static bool udraw_setup_joypad(struct udraw *udraw, struct hid_device *hdev) { struct input_dev *input_dev; input_dev = allocate_and_setup(hdev, DEVICE_NAME " Joypad"); if (!input_dev) return false; input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); set_bit(BTN_SOUTH, input_dev->keybit); set_bit(BTN_NORTH, input_dev->keybit); set_bit(BTN_EAST, input_dev->keybit); set_bit(BTN_WEST, input_dev->keybit); set_bit(BTN_SELECT, input_dev->keybit); set_bit(BTN_START, input_dev->keybit); set_bit(BTN_MODE, input_dev->keybit); input_set_abs_params(input_dev, ABS_X, -127, 127, 0, 0); input_set_abs_params(input_dev, ABS_Y, -127, 127, 0, 0); udraw->joy_input_dev = input_dev; return true; } static int udraw_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct udraw *udraw; int ret; udraw = devm_kzalloc(&hdev->dev, sizeof(struct udraw), GFP_KERNEL); if (!udraw) return -ENOMEM; udraw->hdev = hdev; udraw->last_two_finger_x = -1; udraw->last_two_finger_y = -1; hid_set_drvdata(hdev, udraw); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); return ret; } if (!udraw_setup_joypad(udraw, hdev) || !udraw_setup_touch(udraw, hdev) || !udraw_setup_pen(udraw, hdev) || !udraw_setup_accel(udraw, hdev)) { hid_err(hdev, "could not allocate interfaces\n"); return -ENOMEM; } ret = input_register_device(udraw->joy_input_dev) || input_register_device(udraw->touch_input_dev) || input_register_device(udraw->pen_input_dev) || input_register_device(udraw->accel_input_dev); if (ret) { hid_err(hdev, "failed to register interfaces\n"); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW | HID_CONNECT_DRIVER); if (ret) { hid_err(hdev, "hw start failed\n"); return ret; } return 0; } static const struct hid_device_id udraw_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) }, { } }; MODULE_DEVICE_TABLE(hid, udraw_devices); static struct hid_driver udraw_driver = { .name = "hid-udraw", .id_table = udraw_devices, .raw_event = udraw_raw_event, .probe = udraw_probe, }; module_hid_driver(udraw_driver); |
| 3275 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 | /* SPDX-License-Identifier: GPL-2.0+ */ /* * RCU-based infrastructure for lightweight reader-writer locking * * Copyright (c) 2015, Red Hat, Inc. * * Author: Oleg Nesterov <oleg@redhat.com> */ #ifndef _LINUX_RCU_SYNC_H_ #define _LINUX_RCU_SYNC_H_ #include <linux/wait.h> #include <linux/rcupdate.h> /* Structure to mediate between updaters and fastpath-using readers. */ struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; struct rcu_head cb_head; }; /** * rcu_sync_is_idle() - Are readers permitted to use their fastpaths? * @rsp: Pointer to rcu_sync structure to use for synchronization * * Returns true if readers are permitted to use their fastpaths. Must be * invoked within some flavor of RCU read-side critical section. */ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) { RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(), "suspicious rcu_sync_is_idle() usage"); return !READ_ONCE(rsp->gp_state); /* GP_IDLE */ } extern void rcu_sync_init(struct rcu_sync *); extern void rcu_sync_enter(struct rcu_sync *); extern void rcu_sync_exit(struct rcu_sync *); extern void rcu_sync_dtor(struct rcu_sync *); #define __RCU_SYNC_INITIALIZER(name) { \ .gp_state = 0, \ .gp_count = 0, \ .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \ } #define DEFINE_RCU_SYNC(name) \ struct rcu_sync name = __RCU_SYNC_INITIALIZER(name) #endif /* _LINUX_RCU_SYNC_H_ */ |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 | /* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (c) 2021, 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _HTT_H_ #define _HTT_H_ #include <linux/bug.h> #include <linux/interrupt.h> #include <linux/dmapool.h> #include <linux/hashtable.h> #include <linux/kfifo.h> #include <net/mac80211.h> #include "htc.h" #include "hw.h" #include "rx_desc.h" enum htt_dbg_stats_type { HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0, HTT_DBG_STATS_RX_REORDER = 1 << 1, HTT_DBG_STATS_RX_RATE_INFO = 1 << 2, HTT_DBG_STATS_TX_PPDU_LOG = 1 << 3, HTT_DBG_STATS_TX_RATE_INFO = 1 << 4, /* bits 5-23 currently reserved */ HTT_DBG_NUM_STATS /* keep this last */ }; enum htt_h2t_msg_type { /* host-to-target */ HTT_H2T_MSG_TYPE_VERSION_REQ = 0, HTT_H2T_MSG_TYPE_TX_FRM = 1, HTT_H2T_MSG_TYPE_RX_RING_CFG = 2, HTT_H2T_MSG_TYPE_STATS_REQ = 3, HTT_H2T_MSG_TYPE_SYNC = 4, HTT_H2T_MSG_TYPE_AGGR_CFG = 5, HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6, /* This command is used for sending management frames in HTT < 3.0. * HTT >= 3.0 uses TX_FRM for everything. */ HTT_H2T_MSG_TYPE_MGMT_TX = 7, HTT_H2T_MSG_TYPE_TX_FETCH_RESP = 11, HTT_H2T_NUM_MSGS /* keep this last */ }; struct htt_cmd_hdr { u8 msg_type; } __packed; struct htt_ver_req { u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)]; } __packed; /* * HTT tx MSDU descriptor * * The HTT tx MSDU descriptor is created by the host HTT SW for each * tx MSDU. The HTT tx MSDU descriptor contains the information that * the target firmware needs for the FW's tx processing, particularly * for creating the HW msdu descriptor. * The same HTT tx descriptor is used for HL and LL systems, though * a few fields within the tx descriptor are used only by LL or * only by HL. * The HTT tx descriptor is defined in two manners: by a struct with * bitfields, and by a series of [dword offset, bit mask, bit shift] * definitions. * The target should use the struct def, for simplicity and clarity, * but the host shall use the bit-mast + bit-shift defs, to be endian- * neutral. Specifically, the host shall use the get/set macros built * around the mask + shift defs. */ struct htt_data_tx_desc_frag { union { struct double_word_addr { __le32 paddr; __le32 len; } __packed dword_addr; struct triple_word_addr { __le32 paddr_lo; __le16 paddr_hi; __le16 len_16; } __packed tword_addr; } __packed; } __packed; struct htt_msdu_ext_desc { __le32 tso_flag[3]; __le16 ip_identification; u8 flags; u8 reserved; struct htt_data_tx_desc_frag frags[6]; }; struct htt_msdu_ext_desc_64 { __le32 tso_flag[5]; __le16 ip_identification; u8 flags; u8 reserved; struct htt_data_tx_desc_frag frags[6]; }; #define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0) #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1) #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2) #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE BIT(3) #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE BIT(4) #define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE) #define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 BIT(16) #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 BIT(17) #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 BIT(18) #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 BIT(19) #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64 BIT(20) #define HTT_MSDU_EXT_DESC_FLAG_PARTIAL_CSUM_ENABLE_64 BIT(21) #define HTT_MSDU_CHECKSUM_ENABLE_64 (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 \ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 \ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 \ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 \ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64) enum htt_data_tx_desc_flags0 { HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0, HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1, HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT = 1 << 2, HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY = 1 << 3, HTT_DATA_TX_DESC_FLAGS0_RSVD0 = 1 << 4 #define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0 #define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5 }; enum htt_data_tx_desc_flags1 { #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6 #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB 0 #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5 #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0 #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB 6 HTT_DATA_TX_DESC_FLAGS1_POSTPONED = 1 << 11, HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12, HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13, HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14, HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE = 1 << 15 }; #define HTT_TX_CREDIT_DELTA_ABS_M 0xffff0000 #define HTT_TX_CREDIT_DELTA_ABS_S 16 #define HTT_TX_CREDIT_DELTA_ABS_GET(word) \ (((word) & HTT_TX_CREDIT_DELTA_ABS_M) >> HTT_TX_CREDIT_DELTA_ABS_S) #define HTT_TX_CREDIT_SIGN_BIT_M 0x00000100 #define HTT_TX_CREDIT_SIGN_BIT_S 8 #define HTT_TX_CREDIT_SIGN_BIT_GET(word) \ (((word) & HTT_TX_CREDIT_SIGN_BIT_M) >> HTT_TX_CREDIT_SIGN_BIT_S) enum htt_data_tx_ext_tid { HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16, HTT_DATA_TX_EXT_TID_MGMT = 17, HTT_DATA_TX_EXT_TID_INVALID = 31 }; #define HTT_INVALID_PEERID 0xFFFF /* * htt_data_tx_desc - used for data tx path * * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1. * ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_ * for special kinds of tids * postponed: only for HL hosts. indicates if this is a resend * (HL hosts manage queues on the host ) * more_in_batch: only for HL hosts. indicates if more packets are * pending. this allows target to wait and aggregate * freq: 0 means home channel of given vdev. intended for offchannel */ struct htt_data_tx_desc { u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */ __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */ __le16 len; __le16 id; __le32 frags_paddr; union { __le32 peerid; struct { __le16 peerid; __le16 freq; } __packed offchan_tx; } __packed; u8 prefetch[0]; /* start of frame, for FW classification engine */ } __packed; struct htt_data_tx_desc_64 { u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */ __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */ __le16 len; __le16 id; __le64 frags_paddr; union { __le32 peerid; struct { __le16 peerid; __le16 freq; } __packed offchan_tx; } __packed; u8 prefetch[0]; /* start of frame, for FW classification engine */ } __packed; enum htt_rx_ring_flags { HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0, HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1, HTT_RX_RING_FLAGS_PPDU_START = 1 << 2, HTT_RX_RING_FLAGS_PPDU_END = 1 << 3, HTT_RX_RING_FLAGS_MPDU_START = 1 << 4, HTT_RX_RING_FLAGS_MPDU_END = 1 << 5, HTT_RX_RING_FLAGS_MSDU_START = 1 << 6, HTT_RX_RING_FLAGS_MSDU_END = 1 << 7, HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8, HTT_RX_RING_FLAGS_FRAG_INFO = 1 << 9, HTT_RX_RING_FLAGS_UNICAST_RX = 1 << 10, HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11, HTT_RX_RING_FLAGS_CTRL_RX = 1 << 12, HTT_RX_RING_FLAGS_MGMT_RX = 1 << 13, HTT_RX_RING_FLAGS_NULL_RX = 1 << 14, HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15 }; #define HTT_RX_RING_SIZE_MIN 128 #define HTT_RX_RING_SIZE_MAX 2048 #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1) #define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1) struct htt_rx_ring_rx_desc_offsets { /* the following offsets are in 4-byte units */ __le16 mac80211_hdr_offset; __le16 msdu_payload_offset; __le16 ppdu_start_offset; __le16 ppdu_end_offset; __le16 mpdu_start_offset; __le16 mpdu_end_offset; __le16 msdu_start_offset; __le16 msdu_end_offset; __le16 rx_attention_offset; __le16 frag_info_offset; } __packed; struct htt_rx_ring_setup_ring32 { __le32 fw_idx_shadow_reg_paddr; __le32 rx_ring_base_paddr; __le16 rx_ring_len; /* in 4-byte words */ __le16 rx_ring_bufsize; /* rx skb size - in bytes */ __le16 flags; /* %HTT_RX_RING_FLAGS_ */ __le16 fw_idx_init_val; struct htt_rx_ring_rx_desc_offsets offsets; } __packed; struct htt_rx_ring_setup_ring64 { __le64 fw_idx_shadow_reg_paddr; __le64 rx_ring_base_paddr; __le16 rx_ring_len; /* in 4-byte words */ __le16 rx_ring_bufsize; /* rx skb size - in bytes */ __le16 flags; /* %HTT_RX_RING_FLAGS_ */ __le16 fw_idx_init_val; struct htt_rx_ring_rx_desc_offsets offsets; } __packed; struct htt_rx_ring_setup_hdr { u8 num_rings; /* supported values: 1, 2 */ __le16 rsvd0; } __packed; struct htt_rx_ring_setup_32 { struct htt_rx_ring_setup_hdr hdr; struct htt_rx_ring_setup_ring32 rings[]; } __packed; struct htt_rx_ring_setup_64 { struct htt_rx_ring_setup_hdr hdr; struct htt_rx_ring_setup_ring64 rings[]; } __packed; /* * htt_stats_req - request target to send specified statistics * * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually * so make sure its little-endian. * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually * so make sure its little-endian. * @cfg_val: stat_type specific configuration * @stat_type: see %htt_dbg_stats_type * @cookie_lsb: used for confirmation message from target->host * @cookie_msb: ditto as %cookie */ struct htt_stats_req { u8 upload_types[3]; u8 rsvd0; u8 reset_types[3]; struct { u8 mpdu_bytes; u8 mpdu_num_msdus; u8 msdu_bytes; } __packed; u8 stat_type; __le32 cookie_lsb; __le32 cookie_msb; } __packed; #define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff #define HTT_STATS_BIT_MASK GENMASK(16, 0) /* * htt_oob_sync_req - request out-of-band sync * * The HTT SYNC tells the target to suspend processing of subsequent * HTT host-to-target messages until some other target agent locally * informs the target HTT FW that the current sync counter is equal to * or greater than (in a modulo sense) the sync counter specified in * the SYNC message. * * This allows other host-target components to synchronize their operation * with HTT, e.g. to ensure that tx frames don't get transmitted until a * security key has been downloaded to and activated by the target. * In the absence of any explicit synchronization counter value * specification, the target HTT FW will use zero as the default current * sync value. * * The HTT target FW will suspend its host->target message processing as long * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128. */ struct htt_oob_sync_req { u8 sync_count; __le16 rsvd0; } __packed; struct htt_aggr_conf { u8 max_num_ampdu_subframes; /* amsdu_subframes is limited by 0x1F mask */ u8 max_num_amsdu_subframes; } __packed; struct htt_aggr_conf_v2 { u8 max_num_ampdu_subframes; /* amsdu_subframes is limited by 0x1F mask */ u8 max_num_amsdu_subframes; u8 reserved; } __packed; #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32 struct htt_mgmt_tx_desc_qca99x0 { __le32 rate; } __packed; struct htt_mgmt_tx_desc { u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)]; __le32 msdu_paddr; __le32 desc_id; __le32 len; __le32 vdev_id; u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN]; union { struct htt_mgmt_tx_desc_qca99x0 qca99x0; } __packed; } __packed; enum htt_mgmt_tx_status { HTT_MGMT_TX_STATUS_OK = 0, HTT_MGMT_TX_STATUS_RETRY = 1, HTT_MGMT_TX_STATUS_DROP = 2 }; /*=== target -> host messages ===============================================*/ enum htt_main_t2h_msg_type { HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF = 0x0, HTT_MAIN_T2H_MSG_TYPE_RX_IND = 0x1, HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH = 0x2, HTT_MAIN_T2H_MSG_TYPE_PEER_MAP = 0x3, HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP = 0x4, HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA = 0x5, HTT_MAIN_T2H_MSG_TYPE_RX_DELBA = 0x6, HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, HTT_MAIN_T2H_MSG_TYPE_PKTLOG = 0x8, HTT_MAIN_T2H_MSG_TYPE_STATS_CONF = 0x9, HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, HTT_MAIN_T2H_MSG_TYPE_SEC_IND = 0xb, HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe, HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf, HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND = 0x10, HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11, HTT_MAIN_T2H_MSG_TYPE_TEST, /* keep this last */ HTT_MAIN_T2H_NUM_MSGS }; enum htt_10x_t2h_msg_type { HTT_10X_T2H_MSG_TYPE_VERSION_CONF = 0x0, HTT_10X_T2H_MSG_TYPE_RX_IND = 0x1, HTT_10X_T2H_MSG_TYPE_RX_FLUSH = 0x2, HTT_10X_T2H_MSG_TYPE_PEER_MAP = 0x3, HTT_10X_T2H_MSG_TYPE_PEER_UNMAP = 0x4, HTT_10X_T2H_MSG_TYPE_RX_ADDBA = 0x5, HTT_10X_T2H_MSG_TYPE_RX_DELBA = 0x6, HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, HTT_10X_T2H_MSG_TYPE_PKTLOG = 0x8, HTT_10X_T2H_MSG_TYPE_STATS_CONF = 0x9, HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, HTT_10X_T2H_MSG_TYPE_SEC_IND = 0xb, HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, HTT_10X_T2H_MSG_TYPE_TEST = 0xe, HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE = 0xf, HTT_10X_T2H_MSG_TYPE_AGGR_CONF = 0x11, HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x12, HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0x13, /* keep this last */ HTT_10X_T2H_NUM_MSGS }; enum htt_tlv_t2h_msg_type { HTT_TLV_T2H_MSG_TYPE_VERSION_CONF = 0x0, HTT_TLV_T2H_MSG_TYPE_RX_IND = 0x1, HTT_TLV_T2H_MSG_TYPE_RX_FLUSH = 0x2, HTT_TLV_T2H_MSG_TYPE_PEER_MAP = 0x3, HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP = 0x4, HTT_TLV_T2H_MSG_TYPE_RX_ADDBA = 0x5, HTT_TLV_T2H_MSG_TYPE_RX_DELBA = 0x6, HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, HTT_TLV_T2H_MSG_TYPE_PKTLOG = 0x8, HTT_TLV_T2H_MSG_TYPE_STATS_CONF = 0x9, HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, HTT_TLV_T2H_MSG_TYPE_SEC_IND = 0xb, HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, /* deprecated */ HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe, HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf, HTT_TLV_T2H_MSG_TYPE_RX_PN_IND = 0x10, HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11, HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12, /* 0x13 reservd */ HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14, HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE = 0x15, HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR = 0x16, HTT_TLV_T2H_MSG_TYPE_TEST, /* keep this last */ HTT_TLV_T2H_NUM_MSGS }; enum htt_10_4_t2h_msg_type { HTT_10_4_T2H_MSG_TYPE_VERSION_CONF = 0x0, HTT_10_4_T2H_MSG_TYPE_RX_IND = 0x1, HTT_10_4_T2H_MSG_TYPE_RX_FLUSH = 0x2, HTT_10_4_T2H_MSG_TYPE_PEER_MAP = 0x3, HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP = 0x4, HTT_10_4_T2H_MSG_TYPE_RX_ADDBA = 0x5, HTT_10_4_T2H_MSG_TYPE_RX_DELBA = 0x6, HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, HTT_10_4_T2H_MSG_TYPE_PKTLOG = 0x8, HTT_10_4_T2H_MSG_TYPE_STATS_CONF = 0x9, HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, HTT_10_4_T2H_MSG_TYPE_SEC_IND = 0xb, HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe, HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE = 0xf, HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0x10, HTT_10_4_T2H_MSG_TYPE_RX_PN_IND = 0x11, HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12, HTT_10_4_T2H_MSG_TYPE_TEST = 0x13, HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14, HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15, HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16, HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM = 0x17, HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18, /* 0x19 to 0x2f are reserved */ HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND = 0x30, HTT_10_4_T2H_MSG_TYPE_PEER_STATS = 0x31, /* keep this last */ HTT_10_4_T2H_NUM_MSGS }; enum htt_t2h_msg_type { HTT_T2H_MSG_TYPE_VERSION_CONF, HTT_T2H_MSG_TYPE_RX_IND, HTT_T2H_MSG_TYPE_RX_FLUSH, HTT_T2H_MSG_TYPE_PEER_MAP, HTT_T2H_MSG_TYPE_PEER_UNMAP, HTT_T2H_MSG_TYPE_RX_ADDBA, HTT_T2H_MSG_TYPE_RX_DELBA, HTT_T2H_MSG_TYPE_TX_COMPL_IND, HTT_T2H_MSG_TYPE_PKTLOG, HTT_T2H_MSG_TYPE_STATS_CONF, HTT_T2H_MSG_TYPE_RX_FRAG_IND, HTT_T2H_MSG_TYPE_SEC_IND, HTT_T2H_MSG_TYPE_RC_UPDATE_IND, HTT_T2H_MSG_TYPE_TX_INSPECT_IND, HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION, HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND, HTT_T2H_MSG_TYPE_RX_PN_IND, HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND, HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND, HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE, HTT_T2H_MSG_TYPE_CHAN_CHANGE, HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR, HTT_T2H_MSG_TYPE_AGGR_CONF, HTT_T2H_MSG_TYPE_STATS_NOUPLOAD, HTT_T2H_MSG_TYPE_TEST, HTT_T2H_MSG_TYPE_EN_STATS, HTT_T2H_MSG_TYPE_TX_FETCH_IND, HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM, HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND, HTT_T2H_MSG_TYPE_PEER_STATS, /* keep this last */ HTT_T2H_NUM_MSGS }; /* * htt_resp_hdr - header for target-to-host messages * * msg_type: see htt_t2h_msg_type */ struct htt_resp_hdr { u8 msg_type; } __packed; #define HTT_RESP_HDR_MSG_TYPE_OFFSET 0 #define HTT_RESP_HDR_MSG_TYPE_MASK 0xff #define HTT_RESP_HDR_MSG_TYPE_LSB 0 /* htt_ver_resp - response sent for htt_ver_req */ struct htt_ver_resp { u8 minor; u8 major; u8 rsvd0; } __packed; #define HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI BIT(0) #define HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK GENMASK(7, 0) struct htt_mgmt_tx_completion { u8 rsvd0; u8 rsvd1; u8 flags; __le32 desc_id; __le32 status; __le32 ppdu_id; __le32 info; } __packed; #define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F) #define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0) #define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5) #define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6) #define HTT_RX_INDICATION_INFO0_PPDU_DURATION BIT(7) #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0 #define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK 0x00000FC0 #define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB 6 #define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000 #define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB 12 #define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK 0x00FC0000 #define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB 18 #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000 #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24 #define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0) #define HTT_TX_CMPL_FLAG_PPID_PRESENT BIT(1) #define HTT_TX_CMPL_FLAG_PA_PRESENT BIT(2) #define HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT BIT(3) #define HTT_TX_DATA_RSSI_ENABLE_WCN3990 BIT(3) #define HTT_TX_DATA_APPEND_RETRIES BIT(0) #define HTT_TX_DATA_APPEND_TIMESTAMP BIT(1) struct htt_rx_indication_hdr { u8 info0; /* %HTT_RX_INDICATION_INFO0_ */ __le16 peer_id; __le32 info1; /* %HTT_RX_INDICATION_INFO1_ */ } __packed; #define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID (1 << 0) #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E) #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB (1) #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK (1 << 5) #define HTT_RX_INDICATION_INFO0_END_VALID (1 << 6) #define HTT_RX_INDICATION_INFO0_START_VALID (1 << 7) #define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK 0x00FFFFFF #define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB 0 #define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000 #define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB 24 #define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF #define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB 0 #define HTT_RX_INDICATION_INFO2_SERVICE_MASK 0xFF000000 #define HTT_RX_INDICATION_INFO2_SERVICE_LSB 24 enum htt_rx_legacy_rate { HTT_RX_OFDM_48 = 0, HTT_RX_OFDM_24 = 1, HTT_RX_OFDM_12, HTT_RX_OFDM_6, HTT_RX_OFDM_54, HTT_RX_OFDM_36, HTT_RX_OFDM_18, HTT_RX_OFDM_9, /* long preamble */ HTT_RX_CCK_11_LP = 0, HTT_RX_CCK_5_5_LP = 1, HTT_RX_CCK_2_LP, HTT_RX_CCK_1_LP, /* short preamble */ HTT_RX_CCK_11_SP, HTT_RX_CCK_5_5_SP, HTT_RX_CCK_2_SP }; enum htt_rx_legacy_rate_type { HTT_RX_LEGACY_RATE_OFDM = 0, HTT_RX_LEGACY_RATE_CCK }; enum htt_rx_preamble_type { HTT_RX_LEGACY = 0x4, HTT_RX_HT = 0x8, HTT_RX_HT_WITH_TXBF = 0x9, HTT_RX_VHT = 0xC, HTT_RX_VHT_WITH_TXBF = 0xD, }; /* * Fields: phy_err_valid, phy_err_code, tsf, * usec_timestamp, sub_usec_timestamp * ..are valid only if end_valid == 1. * * Fields: rssi_chains, legacy_rate_type, * legacy_rate_cck, preamble_type, service, * vht_sig_* * ..are valid only if start_valid == 1; */ struct htt_rx_indication_ppdu { u8 combined_rssi; u8 sub_usec_timestamp; u8 phy_err_code; u8 info0; /* HTT_RX_INDICATION_INFO0_ */ struct { u8 pri20_db; u8 ext20_db; u8 ext40_db; u8 ext80_db; } __packed rssi_chains[4]; __le32 tsf; __le32 usec_timestamp; __le32 info1; /* HTT_RX_INDICATION_INFO1_ */ __le32 info2; /* HTT_RX_INDICATION_INFO2_ */ } __packed; enum htt_rx_mpdu_status { HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0, HTT_RX_IND_MPDU_STATUS_OK, HTT_RX_IND_MPDU_STATUS_ERR_FCS, HTT_RX_IND_MPDU_STATUS_ERR_DUP, HTT_RX_IND_MPDU_STATUS_ERR_REPLAY, HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER, /* only accept EAPOL frames */ HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER, HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC, /* Non-data in promiscuous mode */ HTT_RX_IND_MPDU_STATUS_MGMT_CTRL, HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR, HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR, HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR, HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR, HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR, /* * MISC: discard for unspecified reasons. * Leave this enum value last. */ HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF }; struct htt_rx_indication_mpdu_range { u8 mpdu_count; u8 mpdu_range_status; /* %htt_rx_mpdu_status */ u8 pad0; u8 pad1; } __packed; struct htt_rx_indication_prefix { __le16 fw_rx_desc_bytes; u8 pad0; u8 pad1; } __packed; struct htt_rx_indication { struct htt_rx_indication_hdr hdr; struct htt_rx_indication_ppdu ppdu; struct htt_rx_indication_prefix prefix; /* * the following fields are both dynamically sized, so * take care addressing them */ /* the size of this is %fw_rx_desc_bytes */ struct fw_rx_desc_base fw_desc; /* * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4) * and has %num_mpdu_ranges elements. */ struct htt_rx_indication_mpdu_range mpdu_ranges[]; } __packed; /* High latency version of the RX indication */ struct htt_rx_indication_hl { struct htt_rx_indication_hdr hdr; struct htt_rx_indication_ppdu ppdu; struct htt_rx_indication_prefix prefix; struct fw_rx_desc_hl fw_desc; struct htt_rx_indication_mpdu_range mpdu_ranges[]; } __packed; struct htt_hl_rx_desc { __le32 info; __le32 pn_31_0; union { struct { __le16 pn_47_32; __le16 pn_63_48; } pn16; __le32 pn_63_32; } u0; __le32 pn_95_64; __le32 pn_127_96; } __packed; static inline struct htt_rx_indication_mpdu_range * htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind) { void *ptr = rx_ind; ptr += sizeof(rx_ind->hdr) + sizeof(rx_ind->ppdu) + sizeof(rx_ind->prefix) + roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4); return ptr; } static inline struct htt_rx_indication_mpdu_range * htt_rx_ind_get_mpdu_ranges_hl(struct htt_rx_indication_hl *rx_ind) { void *ptr = rx_ind; ptr += sizeof(rx_ind->hdr) + sizeof(rx_ind->ppdu) + sizeof(rx_ind->prefix) + sizeof(rx_ind->fw_desc); return ptr; } enum htt_rx_flush_mpdu_status { HTT_RX_FLUSH_MPDU_DISCARD = 0, HTT_RX_FLUSH_MPDU_REORDER = 1, }; /* * htt_rx_flush - discard or reorder given range of mpdus * * Note: host must check if all sequence numbers between * [seq_num_start, seq_num_end-1] are valid. */ struct htt_rx_flush { __le16 peer_id; u8 tid; u8 rsvd0; u8 mpdu_status; /* %htt_rx_flush_mpdu_status */ u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */ u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */ }; struct htt_rx_peer_map { u8 vdev_id; __le16 peer_id; u8 addr[6]; u8 rsvd0; u8 rsvd1; } __packed; struct htt_rx_peer_unmap { u8 rsvd0; __le16 peer_id; } __packed; enum htt_txrx_sec_cast_type { HTT_TXRX_SEC_MCAST = 0, HTT_TXRX_SEC_UCAST }; enum htt_rx_pn_check_type { HTT_RX_NON_PN_CHECK = 0, HTT_RX_PN_CHECK }; enum htt_rx_tkip_demic_type { HTT_RX_NON_TKIP_MIC = 0, HTT_RX_TKIP_MIC }; enum htt_security_types { HTT_SECURITY_NONE, HTT_SECURITY_WEP128, HTT_SECURITY_WEP104, HTT_SECURITY_WEP40, HTT_SECURITY_TKIP, HTT_SECURITY_TKIP_NOMIC, HTT_SECURITY_AES_CCMP, HTT_SECURITY_WAPI, HTT_NUM_SECURITY_TYPES /* keep this last! */ }; #define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2 #define ATH10K_TXRX_NUM_EXT_TIDS 19 #define ATH10K_TXRX_NON_QOS_TID 16 enum htt_security_flags { #define HTT_SECURITY_TYPE_MASK 0x7F #define HTT_SECURITY_TYPE_LSB 0 HTT_SECURITY_IS_UNICAST = 1 << 7 }; struct htt_security_indication { union { /* dont use bitfields; undefined behaviour */ u8 flags; /* %htt_security_flags */ struct { u8 security_type:7, /* %htt_security_types */ is_unicast:1; } __packed; } __packed; __le16 peer_id; u8 michael_key[8]; u8 wapi_rsc[16]; } __packed; #define HTT_RX_BA_INFO0_TID_MASK 0x000F #define HTT_RX_BA_INFO0_TID_LSB 0 #define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0 #define HTT_RX_BA_INFO0_PEER_ID_LSB 4 struct htt_rx_addba { u8 window_size; __le16 info0; /* %HTT_RX_BA_INFO0_ */ } __packed; struct htt_rx_delba { u8 rsvd0; __le16 info0; /* %HTT_RX_BA_INFO0_ */ } __packed; enum htt_data_tx_status { HTT_DATA_TX_STATUS_OK = 0, HTT_DATA_TX_STATUS_DISCARD = 1, HTT_DATA_TX_STATUS_NO_ACK = 2, HTT_DATA_TX_STATUS_POSTPONE = 3 /* HL only */ }; enum htt_data_tx_flags { #define HTT_DATA_TX_STATUS_MASK 0x07 #define HTT_DATA_TX_STATUS_LSB 0 #define HTT_DATA_TX_TID_MASK 0x78 #define HTT_DATA_TX_TID_LSB 3 HTT_DATA_TX_TID_INVALID = 1 << 7 }; #define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF struct htt_append_retries { __le16 msdu_id; u8 tx_retries; u8 flag; } __packed; struct htt_data_tx_completion_ext { struct htt_append_retries a_retries; __le32 t_stamp; __le16 msdus_rssi[]; } __packed; /* * @brief target -> host TX completion indication message definition * * @details * The following diagram shows the format of the TX completion indication sent * from the target to the host * * |31 28|27|26|25|24|23 16| 15 |14 11|10 8|7 0| * |-------------------------------------------------------------| * header: |rsvd |A2|TP|A1|A0| num | t_i| tid |status| msg_type | * |-------------------------------------------------------------| * payload: | MSDU1 ID | MSDU0 ID | * |-------------------------------------------------------------| * : MSDU3 ID : MSDU2 ID : * |-------------------------------------------------------------| * | struct htt_tx_compl_ind_append_retries | * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| * | struct htt_tx_compl_ind_append_tx_tstamp | * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| * | MSDU1 ACK RSSI | MSDU0 ACK RSSI | * |-------------------------------------------------------------| * : MSDU3 ACK RSSI : MSDU2 ACK RSSI : * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| * -msg_type * Bits 7:0 * Purpose: identifies this as HTT TX completion indication * -status * Bits 10:8 * Purpose: the TX completion status of payload fragmentations descriptors * Value: could be HTT_TX_COMPL_IND_STAT_OK or HTT_TX_COMPL_IND_STAT_DISCARD * -tid * Bits 14:11 * Purpose: the tid associated with those fragmentation descriptors. It is * valid or not, depending on the tid_invalid bit. * Value: 0 to 15 * -tid_invalid * Bits 15:15 * Purpose: this bit indicates whether the tid field is valid or not * Value: 0 indicates valid, 1 indicates invalid * -num * Bits 23:16 * Purpose: the number of payload in this indication * Value: 1 to 255 * -A0 = append * Bits 24:24 * Purpose: append the struct htt_tx_compl_ind_append_retries which contains * the number of tx retries for one MSDU at the end of this message * Value: 0 indicates no appending, 1 indicates appending * -A1 = append1 * Bits 25:25 * Purpose: Append the struct htt_tx_compl_ind_append_tx_tstamp which * contains the timestamp info for each TX msdu id in payload. * Value: 0 indicates no appending, 1 indicates appending * -TP = MSDU tx power presence * Bits 26:26 * Purpose: Indicate whether the TX_COMPL_IND includes a tx power report * for each MSDU referenced by the TX_COMPL_IND message. * The order of the per-MSDU tx power reports matches the order * of the MSDU IDs. * Value: 0 indicates not appending, 1 indicates appending * -A2 = append2 * Bits 27:27 * Purpose: Indicate whether data ACK RSSI is appended for each MSDU in * TX_COMP_IND message. The order of the per-MSDU ACK RSSI report * matches the order of the MSDU IDs. * The ACK RSSI values are valid when status is COMPLETE_OK (and * this append2 bit is set). * Value: 0 indicates not appending, 1 indicates appending */ struct htt_data_tx_completion { union { u8 flags; struct { u8 status:3, tid:4, tid_invalid:1; } __packed; } __packed; u8 num_msdus; u8 flags2; /* HTT_TX_CMPL_FLAG_DATA_RSSI */ __le16 msdus[]; /* variable length based on %num_msdus */ } __packed; #define HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK GENMASK(15, 0) #define HTT_TX_PPDU_DUR_INFO0_TID_MASK GENMASK(20, 16) struct htt_data_tx_ppdu_dur { __le32 info0; /* HTT_TX_PPDU_DUR_INFO0_ */ __le32 tx_duration; /* in usecs */ } __packed; #define HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK GENMASK(7, 0) struct htt_data_tx_compl_ppdu_dur { __le32 info0; /* HTT_TX_COMPL_PPDU_DUR_INFO0_ */ struct htt_data_tx_ppdu_dur ppdu_dur[]; } __packed; struct htt_tx_compl_ind_base { u32 hdr; u16 payload[1/*or more*/]; } __packed; struct htt_rc_tx_done_params { u32 rate_code; u32 rate_code_flags; u32 flags; u32 num_enqued; /* 1 for non-AMPDU */ u32 num_retries; u32 num_failed; /* for AMPDU */ u32 ack_rssi; u32 time_stamp; u32 is_probe; }; struct htt_rc_update { u8 vdev_id; __le16 peer_id; u8 addr[6]; u8 num_elems; u8 rsvd0; struct htt_rc_tx_done_params params[]; /* variable length %num_elems */ } __packed; /* see htt_rx_indication for similar fields and descriptions */ struct htt_rx_fragment_indication { union { u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */ struct { u8 ext_tid:5, flush_valid:1; } __packed; } __packed; __le16 peer_id; __le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */ __le16 fw_rx_desc_bytes; __le16 rsvd0; u8 fw_msdu_rx_desc[]; } __packed; #define ATH10K_IEEE80211_EXTIV BIT(5) #define ATH10K_IEEE80211_TKIP_MICLEN 8 /* trailing MIC */ #define HTT_RX_FRAG_IND_INFO0_HEADER_LEN 16 #define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F #define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0 #define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20 #define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB 5 #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB 0 #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0 #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6 struct htt_rx_pn_ind { __le16 peer_id; u8 tid; u8 seqno_start; u8 seqno_end; u8 pn_ie_count; u8 reserved; u8 pn_ies[]; } __packed; struct htt_rx_offload_msdu { __le16 msdu_len; __le16 peer_id; u8 vdev_id; u8 tid; u8 fw_desc; u8 payload[]; } __packed; struct htt_rx_offload_ind { u8 reserved; __le16 msdu_count; } __packed; struct htt_rx_in_ord_msdu_desc { __le32 msdu_paddr; __le16 msdu_len; u8 fw_desc; u8 reserved; } __packed; struct htt_rx_in_ord_msdu_desc_ext { __le64 msdu_paddr; __le16 msdu_len; u8 fw_desc; u8 reserved; } __packed; struct htt_rx_in_ord_ind { u8 info; __le16 peer_id; u8 vdev_id; u8 reserved; __le16 msdu_count; union { DECLARE_FLEX_ARRAY(struct htt_rx_in_ord_msdu_desc, msdu_descs32); DECLARE_FLEX_ARRAY(struct htt_rx_in_ord_msdu_desc_ext, msdu_descs64); } __packed; } __packed; #define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f #define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0 #define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020 #define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5 #define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040 #define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6 /* * target -> host test message definition * * The following field definitions describe the format of the test * message sent from the target to the host. * The message consists of a 4-octet header, followed by a variable * number of 32-bit integer values, followed by a variable number * of 8-bit character values. * * |31 16|15 8|7 0| * |-----------------------------------------------------------| * | num chars | num ints | msg type | * |-----------------------------------------------------------| * | int 0 | * |-----------------------------------------------------------| * | int 1 | * |-----------------------------------------------------------| * | ... | * |-----------------------------------------------------------| * | char 3 | char 2 | char 1 | char 0 | * |-----------------------------------------------------------| * | | | ... | char 4 | * |-----------------------------------------------------------| * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as a test message * Value: HTT_MSG_TYPE_TEST * - NUM_INTS * Bits 15:8 * Purpose: indicate how many 32-bit integers follow the message header * - NUM_CHARS * Bits 31:16 * Purpose: indicate how many 8-bit characters follow the series of integers */ struct htt_rx_test { u8 num_ints; __le16 num_chars; /* payload consists of 2 lists: * a) num_ints * sizeof(__le32) * b) num_chars * sizeof(u8) aligned to 4bytes */ u8 payload[]; } __packed; static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test) { return (__le32 *)rx_test->payload; } static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test) { return rx_test->payload + (rx_test->num_ints * sizeof(__le32)); } /* * target -> host packet log message * * The following field definitions describe the format of the packet log * message sent from the target to the host. * The message consists of a 4-octet header,followed by a variable number * of 32-bit character values. * * |31 24|23 16|15 8|7 0| * |-----------------------------------------------------------| * | | | | msg type | * |-----------------------------------------------------------| * | payload | * |-----------------------------------------------------------| * - MSG_TYPE * Bits 7:0 * Purpose: identifies this as a test message * Value: HTT_MSG_TYPE_PACKETLOG */ struct htt_pktlog_msg { u8 pad[3]; u8 payload[]; } __packed; struct htt_dbg_stats_rx_reorder_stats { /* Non QoS MPDUs received */ __le32 deliver_non_qos; /* MPDUs received in-order */ __le32 deliver_in_order; /* Flush due to reorder timer expired */ __le32 deliver_flush_timeout; /* Flush due to move out of window */ __le32 deliver_flush_oow; /* Flush due to DELBA */ __le32 deliver_flush_delba; /* MPDUs dropped due to FCS error */ __le32 fcs_error; /* MPDUs dropped due to monitor mode non-data packet */ __le32 mgmt_ctrl; /* MPDUs dropped due to invalid peer */ __le32 invalid_peer; /* MPDUs dropped due to duplication (non aggregation) */ __le32 dup_non_aggr; /* MPDUs dropped due to processed before */ __le32 dup_past; /* MPDUs dropped due to duplicate in reorder queue */ __le32 dup_in_reorder; /* Reorder timeout happened */ __le32 reorder_timeout; /* invalid bar ssn */ __le32 invalid_bar_ssn; /* reorder reset due to bar ssn */ __le32 ssn_reset; }; struct htt_dbg_stats_wal_tx_stats { /* Num HTT cookies queued to dispatch list */ __le32 comp_queued; /* Num HTT cookies dispatched */ __le32 comp_delivered; /* Num MSDU queued to WAL */ __le32 msdu_enqued; /* Num MPDU queue to WAL */ __le32 mpdu_enqued; /* Num MSDUs dropped by WMM limit */ __le32 wmm_drop; /* Num Local frames queued */ __le32 local_enqued; /* Num Local frames done */ __le32 local_freed; /* Num queued to HW */ __le32 hw_queued; /* Num PPDU reaped from HW */ __le32 hw_reaped; /* Num underruns */ __le32 underrun; /* Num PPDUs cleaned up in TX abort */ __le32 tx_abort; /* Num MPDUs requeued by SW */ __le32 mpdus_requeued; /* excessive retries */ __le32 tx_ko; /* data hw rate code */ __le32 data_rc; /* Scheduler self triggers */ __le32 self_triggers; /* frames dropped due to excessive sw retries */ __le32 sw_retry_failure; /* illegal rate phy errors */ __le32 illgl_rate_phy_err; /* wal pdev continuous xretry */ __le32 pdev_cont_xretry; /* wal pdev continuous xretry */ __le32 pdev_tx_timeout; /* wal pdev resets */ __le32 pdev_resets; __le32 phy_underrun; /* MPDU is more than txop limit */ __le32 txop_ovf; } __packed; struct htt_dbg_stats_wal_rx_stats { /* Cnts any change in ring routing mid-ppdu */ __le32 mid_ppdu_route_change; /* Total number of statuses processed */ __le32 status_rcvd; /* Extra frags on rings 0-3 */ __le32 r0_frags; __le32 r1_frags; __le32 r2_frags; __le32 r3_frags; /* MSDUs / MPDUs delivered to HTT */ __le32 htt_msdus; __le32 htt_mpdus; /* MSDUs / MPDUs delivered to local stack */ __le32 loc_msdus; __le32 loc_mpdus; /* AMSDUs that have more MSDUs than the status ring size */ __le32 oversize_amsdu; /* Number of PHY errors */ __le32 phy_errs; /* Number of PHY errors drops */ __le32 phy_err_drop; /* Number of mpdu errors - FCS, MIC, ENC etc. */ __le32 mpdu_errs; } __packed; struct htt_dbg_stats_wal_peer_stats { __le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */ } __packed; struct htt_dbg_stats_wal_pdev_txrx { struct htt_dbg_stats_wal_tx_stats tx_stats; struct htt_dbg_stats_wal_rx_stats rx_stats; struct htt_dbg_stats_wal_peer_stats peer_stats; } __packed; struct htt_dbg_stats_rx_rate_info { __le32 mcs[10]; __le32 sgi[10]; __le32 nss[4]; __le32 stbc[10]; __le32 bw[3]; __le32 pream[6]; __le32 ldpc; __le32 txbf; }; /* * htt_dbg_stats_status - * present - The requested stats have been delivered in full. * This indicates that either the stats information was contained * in its entirety within this message, or else this message * completes the delivery of the requested stats info that was * partially delivered through earlier STATS_CONF messages. * partial - The requested stats have been delivered in part. * One or more subsequent STATS_CONF messages with the same * cookie value will be sent to deliver the remainder of the * information. * error - The requested stats could not be delivered, for example due * to a shortage of memory to construct a message holding the * requested stats. * invalid - The requested stat type is either not recognized, or the * target is configured to not gather the stats type in question. * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * series_done - This special value indicates that no further stats info * elements are present within a series of stats info elems * (within a stats upload confirmation message). */ enum htt_dbg_stats_status { HTT_DBG_STATS_STATUS_PRESENT = 0, HTT_DBG_STATS_STATUS_PARTIAL = 1, HTT_DBG_STATS_STATUS_ERROR = 2, HTT_DBG_STATS_STATUS_INVALID = 3, HTT_DBG_STATS_STATUS_SERIES_DONE = 7 }; /* * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank * * The following field definitions describe the format of the HTT host * to target frag_desc/msdu_ext bank configuration message. * The message contains the based address and the min and max id of the * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and * MSDU_EXT/FRAG_DESC. * HTT will use id in HTT descriptor instead sending the frag_desc_ptr. * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0 * the hardware does the mapping/translation. * * Total banks that can be configured is configured to 16. * * This should be called before any TX has be initiated by the HTT * * |31 16|15 8|7 5|4 0| * |------------------------------------------------------------| * | DESC_SIZE | NUM_BANKS | RES |SWP|pdev| msg type | * |------------------------------------------------------------| * | BANK0_BASE_ADDRESS | * |------------------------------------------------------------| * | ... | * |------------------------------------------------------------| * | BANK15_BASE_ADDRESS | * |------------------------------------------------------------| * | BANK0_MAX_ID | BANK0_MIN_ID | * |------------------------------------------------------------| * | ... | * |------------------------------------------------------------| * | BANK15_MAX_ID | BANK15_MIN_ID | * |------------------------------------------------------------| * Header fields: * - MSG_TYPE * Bits 7:0 * Value: 0x6 * - BANKx_BASE_ADDRESS * Bits 31:0 * Purpose: Provide a mechanism to specify the base address of the MSDU_EXT * bank physical/bus address. * - BANKx_MIN_ID * Bits 15:0 * Purpose: Provide a mechanism to specify the min index that needs to * mapped. * - BANKx_MAX_ID * Bits 31:16 * Purpose: Provide a mechanism to specify the max index that needs to * */ struct htt_frag_desc_bank_id { __le16 bank_min_id; __le16 bank_max_id; } __packed; /* real is 16 but it wouldn't fit in the max htt message size * so we use a conservatively safe value for now */ #define HTT_FRAG_DESC_BANK_MAX 4 #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03 #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0 #define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP BIT(2) #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID BIT(3) #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK BIT(4) #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB 4 enum htt_q_depth_type { HTT_Q_DEPTH_TYPE_BYTES = 0, HTT_Q_DEPTH_TYPE_MSDUS = 1, }; #define HTT_TX_Q_STATE_NUM_PEERS (TARGET_10_4_NUM_QCACHE_PEERS_MAX + \ TARGET_10_4_NUM_VDEVS) #define HTT_TX_Q_STATE_NUM_TIDS 8 #define HTT_TX_Q_STATE_ENTRY_SIZE 1 #define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0 /** * struct htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config * * Defines host q state format and behavior. See htt_q_state. * * @paddr: Queue physical address * @num_peers: Number of supported peers * @num_tids: Number of supported TIDs * @record_size: Defines the size of each host q entry in bytes. In practice * however firmware (at least 10.4.3-00191) ignores this host * configuration value and uses hardcoded value of 1. * @record_multiplier: This is valid only when q depth type is MSDUs. It * defines the exponent for the power of 2 multiplication. * @pad: struct padding for 32-bit alignment */ struct htt_q_state_conf { __le32 paddr; __le16 num_peers; __le16 num_tids; u8 record_size; u8 record_multiplier; u8 pad[2]; } __packed; struct htt_frag_desc_bank_cfg32 { u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */ u8 num_banks; u8 desc_size; __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX]; struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX]; struct htt_q_state_conf q_state; } __packed; struct htt_frag_desc_bank_cfg64 { u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */ u8 num_banks; u8 desc_size; __le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX]; struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX]; struct htt_q_state_conf q_state; } __packed; #define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128 #define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f #define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0 #define HTT_TX_Q_STATE_ENTRY_EXP_MASK 0xc0 #define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6 /** * struct htt_q_state - shared between host and firmware via DMA * * This structure is used for the host to expose it's software queue state to * firmware so that its rate control can schedule fetch requests for optimized * performance. This is most notably used for MU-MIMO aggregation when multiple * MU clients are connected. * * @count: Each element defines the host queue depth. When q depth type was * configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as: * FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and * HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as * HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 ** * record_multiplier (see htt_q_state_conf). * @map: Used by firmware to quickly check which host queues are not empty. It * is a bitmap simply saying. * @seq: Used by firmware to quickly check if the host queues were updated * since it last checked. * * FIXME: Is the q_state map[] size calculation really correct? */ struct htt_q_state { u8 count[HTT_TX_Q_STATE_NUM_TIDS][HTT_TX_Q_STATE_NUM_PEERS]; u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32]; __le32 seq; } __packed; #define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK 0x0fff #define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB 0 #define HTT_TX_FETCH_RECORD_INFO_TID_MASK 0xf000 #define HTT_TX_FETCH_RECORD_INFO_TID_LSB 12 struct htt_tx_fetch_record { __le16 info; /* HTT_TX_FETCH_IND_RECORD_INFO_ */ __le16 num_msdus; __le32 num_bytes; } __packed; struct htt_tx_fetch_ind { u8 pad0; __le16 fetch_seq_num; __le32 token; __le16 num_resp_ids; __le16 num_records; union { /* ath10k_htt_get_tx_fetch_ind_resp_ids() */ DECLARE_FLEX_ARRAY(__le32, resp_ids); DECLARE_FLEX_ARRAY(struct htt_tx_fetch_record, records); } __packed; } __packed; static inline void * ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind *ind) { return (void *)&ind->records[le16_to_cpu(ind->num_records)]; } struct htt_tx_fetch_resp { u8 pad0; __le16 resp_id; __le16 fetch_seq_num; __le16 num_records; __le32 token; struct htt_tx_fetch_record records[]; } __packed; struct htt_tx_fetch_confirm { u8 pad0; __le16 num_resp_ids; __le32 resp_ids[]; } __packed; enum htt_tx_mode_switch_mode { HTT_TX_MODE_SWITCH_PUSH = 0, HTT_TX_MODE_SWITCH_PUSH_PULL = 1, }; #define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE BIT(0) #define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK 0xfffe #define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB 1 #define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK 0x0003 #define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB 0 #define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK 0xfffc #define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB 2 #define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK 0x0fff #define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB 0 #define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK 0xf000 #define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB 12 struct htt_tx_mode_switch_record { __le16 info0; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */ __le16 num_max_msdus; } __packed; struct htt_tx_mode_switch_ind { u8 pad0; __le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */ __le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */ u8 pad1[2]; struct htt_tx_mode_switch_record records[]; } __packed; struct htt_channel_change { u8 pad[3]; __le32 freq; __le32 center_freq1; __le32 center_freq2; __le32 phymode; } __packed; struct htt_per_peer_tx_stats_ind { __le32 succ_bytes; __le32 retry_bytes; __le32 failed_bytes; u8 ratecode; u8 flags; __le16 peer_id; __le16 succ_pkts; __le16 retry_pkts; __le16 failed_pkts; __le16 tx_duration; __le32 reserved1; __le32 reserved2; } __packed; struct htt_peer_tx_stats { u8 num_ppdu; u8 ppdu_len; u8 version; u8 payload[]; } __packed; #define ATH10K_10_2_TX_STATS_OFFSET 136 #define PEER_STATS_FOR_NO_OF_PPDUS 4 struct ath10k_10_2_peer_tx_stats { u8 ratecode[PEER_STATS_FOR_NO_OF_PPDUS]; u8 success_pkts[PEER_STATS_FOR_NO_OF_PPDUS]; __le16 success_bytes[PEER_STATS_FOR_NO_OF_PPDUS]; u8 retry_pkts[PEER_STATS_FOR_NO_OF_PPDUS]; __le16 retry_bytes[PEER_STATS_FOR_NO_OF_PPDUS]; u8 failed_pkts[PEER_STATS_FOR_NO_OF_PPDUS]; __le16 failed_bytes[PEER_STATS_FOR_NO_OF_PPDUS]; u8 flags[PEER_STATS_FOR_NO_OF_PPDUS]; __le32 tx_duration; u8 tx_ppdu_cnt; u8 peer_id; } __packed; union htt_rx_pn_t { /* WEP: 24-bit PN */ u32 pn24; /* TKIP or CCMP: 48-bit PN */ u64 pn48; /* WAPI: 128-bit PN */ u64 pn128[2]; }; struct htt_cmd { struct htt_cmd_hdr hdr; union { struct htt_ver_req ver_req; struct htt_mgmt_tx_desc mgmt_tx; struct htt_data_tx_desc data_tx; struct htt_rx_ring_setup_32 rx_setup_32; struct htt_rx_ring_setup_64 rx_setup_64; struct htt_stats_req stats_req; struct htt_oob_sync_req oob_sync_req; struct htt_aggr_conf aggr_conf; struct htt_aggr_conf_v2 aggr_conf_v2; struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32; struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64; struct htt_tx_fetch_resp tx_fetch_resp; }; } __packed; struct htt_resp { struct htt_resp_hdr hdr; union { struct htt_ver_resp ver_resp; struct htt_mgmt_tx_completion mgmt_tx_completion; struct htt_data_tx_completion data_tx_completion; struct htt_rx_indication rx_ind; struct htt_rx_indication_hl rx_ind_hl; struct htt_rx_fragment_indication rx_frag_ind; struct htt_rx_peer_map peer_map; struct htt_rx_peer_unmap peer_unmap; struct htt_rx_flush rx_flush; struct htt_rx_addba rx_addba; struct htt_rx_delba rx_delba; struct htt_security_indication security_indication; struct htt_rc_update rc_update; struct htt_rx_test rx_test; struct htt_pktlog_msg pktlog_msg; struct htt_rx_pn_ind rx_pn_ind; struct htt_rx_offload_ind rx_offload_ind; struct htt_rx_in_ord_ind rx_in_ord_ind; struct htt_tx_fetch_ind tx_fetch_ind; struct htt_tx_fetch_confirm tx_fetch_confirm; struct htt_tx_mode_switch_ind tx_mode_switch_ind; struct htt_channel_change chan_change; struct htt_peer_tx_stats peer_tx_stats; } __packed; } __packed; /*** host side structures follow ***/ struct htt_tx_done { u16 msdu_id; u16 status; u8 ack_rssi; }; enum htt_tx_compl_state { HTT_TX_COMPL_STATE_NONE, HTT_TX_COMPL_STATE_ACK, HTT_TX_COMPL_STATE_NOACK, HTT_TX_COMPL_STATE_DISCARD, }; struct htt_peer_map_event { u8 vdev_id; u16 peer_id; u8 addr[ETH_ALEN]; }; struct htt_peer_unmap_event { u16 peer_id; }; struct ath10k_htt_txbuf_32 { struct htt_data_tx_desc_frag frags[2]; struct ath10k_htc_hdr htc_hdr; struct htt_cmd_hdr cmd_hdr; struct htt_data_tx_desc cmd_tx; } __packed __aligned(4); struct ath10k_htt_txbuf_64 { struct htt_data_tx_desc_frag frags[2]; struct ath10k_htc_hdr htc_hdr; struct htt_cmd_hdr cmd_hdr; struct htt_data_tx_desc_64 cmd_tx; } __packed __aligned(4); struct ath10k_htt { struct ath10k *ar; enum ath10k_htc_ep_id eid; struct sk_buff_head rx_indication_head; u8 target_version_major; u8 target_version_minor; struct completion target_version_received; u8 max_num_amsdu; u8 max_num_ampdu; const enum htt_t2h_msg_type *t2h_msg_types; u32 t2h_msg_types_max; struct { /* * Ring of network buffer objects - This ring is * used exclusively by the host SW. This ring * mirrors the dev_addrs_ring that is shared * between the host SW and the MAC HW. The host SW * uses this netbufs ring to locate the network * buffer objects whose data buffers the HW has * filled. */ struct sk_buff **netbufs_ring; /* This is used only with firmware supporting IN_ORD_IND. * * With Full Rx Reorder the HTT Rx Ring is more of a temporary * buffer ring from which buffer addresses are copied by the * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND * pointing to specific (re-ordered) buffers. * * FIXME: With kernel generic hashing functions there's a lot * of hash collisions for sk_buffs. */ bool in_ord_rx; DECLARE_HASHTABLE(skb_table, 4); /* * Ring of buffer addresses - * This ring holds the "physical" device address of the * rx buffers the host SW provides for the MAC HW to * fill. */ union { __le64 *paddrs_ring_64; __le32 *paddrs_ring_32; }; /* * Base address of ring, as a "physical" device address * rather than a CPU address. */ dma_addr_t base_paddr; /* how many elems in the ring (power of 2) */ int size; /* size - 1 */ unsigned int size_mask; /* how many rx buffers to keep in the ring */ int fill_level; /* how many rx buffers (full+empty) are in the ring */ int fill_cnt; /* * alloc_idx - where HTT SW has deposited empty buffers * This is allocated in consistent mem, so that the FW can * read this variable, and program the HW's FW_IDX reg with * the value of this shadow register. */ struct { __le32 *vaddr; dma_addr_t paddr; } alloc_idx; /* where HTT SW has processed bufs filled by rx MAC DMA */ struct { unsigned int msdu_payld; } sw_rd_idx; /* * refill_retry_timer - timer triggered when the ring is * not refilled to the level expected */ struct timer_list refill_retry_timer; /* Protects access to all rx ring buffer state variables */ spinlock_t lock; } rx_ring; unsigned int prefetch_len; /* Protects access to pending_tx, num_pending_tx */ spinlock_t tx_lock; int max_num_pending_tx; int num_pending_tx; int num_pending_mgmt_tx; struct idr pending_tx; wait_queue_head_t empty_tx_wq; /* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */ DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done); /* set if host-fw communication goes haywire * used to avoid further failures */ bool rx_confused; atomic_t num_mpdus_ready; /* This is used to group tx/rx completions separately and process them * in batches to reduce cache stalls */ struct sk_buff_head rx_msdus_q; struct sk_buff_head rx_in_ord_compl_q; struct sk_buff_head tx_fetch_ind_q; /* rx_status template */ struct ieee80211_rx_status rx_status; struct { dma_addr_t paddr; union { struct htt_msdu_ext_desc *vaddr_desc_32; struct htt_msdu_ext_desc_64 *vaddr_desc_64; }; size_t size; } frag_desc; struct { dma_addr_t paddr; union { struct ath10k_htt_txbuf_32 *vaddr_txbuff_32; struct ath10k_htt_txbuf_64 *vaddr_txbuff_64; }; size_t size; } txbuf; struct { bool enabled; struct htt_q_state *vaddr; dma_addr_t paddr; u16 num_push_allowed; u16 num_peers; u16 num_tids; enum htt_tx_mode_switch_mode mode; enum htt_q_depth_type type; } tx_q_state; bool tx_mem_allocated; const struct ath10k_htt_tx_ops *tx_ops; const struct ath10k_htt_rx_ops *rx_ops; bool disable_tx_comp; bool bundle_tx; struct sk_buff_head tx_req_head; struct sk_buff_head tx_complete_head; }; struct ath10k_htt_tx_ops { int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt); int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt); int (*htt_alloc_frag_desc)(struct ath10k_htt *htt); void (*htt_free_frag_desc)(struct ath10k_htt *htt); int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, struct sk_buff *msdu); int (*htt_alloc_txbuff)(struct ath10k_htt *htt); void (*htt_free_txbuff)(struct ath10k_htt *htt); int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu); void (*htt_flush_tx)(struct ath10k_htt *htt); }; static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt) { if (!htt->tx_ops->htt_send_rx_ring_cfg) return -EOPNOTSUPP; return htt->tx_ops->htt_send_rx_ring_cfg(htt); } static inline int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) { if (!htt->tx_ops->htt_send_frag_desc_bank_cfg) return -EOPNOTSUPP; return htt->tx_ops->htt_send_frag_desc_bank_cfg(htt); } static inline int ath10k_htt_alloc_frag_desc(struct ath10k_htt *htt) { if (!htt->tx_ops->htt_alloc_frag_desc) return -EOPNOTSUPP; return htt->tx_ops->htt_alloc_frag_desc(htt); } static inline void ath10k_htt_free_frag_desc(struct ath10k_htt *htt) { if (htt->tx_ops->htt_free_frag_desc) htt->tx_ops->htt_free_frag_desc(htt); } static inline int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, struct sk_buff *msdu) { return htt->tx_ops->htt_tx(htt, txmode, msdu); } static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt) { if (htt->tx_ops->htt_flush_tx) htt->tx_ops->htt_flush_tx(htt); } static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt) { if (!htt->tx_ops->htt_alloc_txbuff) return -EOPNOTSUPP; return htt->tx_ops->htt_alloc_txbuff(htt); } static inline void ath10k_htt_free_txbuff(struct ath10k_htt *htt) { if (htt->tx_ops->htt_free_txbuff) htt->tx_ops->htt_free_txbuff(htt); } static inline int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu) { if (!htt->tx_ops->htt_h2t_aggr_cfg_msg) return -EOPNOTSUPP; return htt->tx_ops->htt_h2t_aggr_cfg_msg(htt, max_subfrms_ampdu, max_subfrms_amsdu); } struct ath10k_htt_rx_ops { size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt); void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr); void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr, int idx); void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt); void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx); bool (*htt_rx_proc_rx_frag_ind)(struct ath10k_htt *htt, struct htt_rx_fragment_indication *rx, struct sk_buff *skb); }; static inline size_t ath10k_htt_get_rx_ring_size(struct ath10k_htt *htt) { if (!htt->rx_ops->htt_get_rx_ring_size) return 0; return htt->rx_ops->htt_get_rx_ring_size(htt); } static inline void ath10k_htt_config_paddrs_ring(struct ath10k_htt *htt, void *vaddr) { if (htt->rx_ops->htt_config_paddrs_ring) htt->rx_ops->htt_config_paddrs_ring(htt, vaddr); } static inline void ath10k_htt_set_paddrs_ring(struct ath10k_htt *htt, dma_addr_t paddr, int idx) { if (htt->rx_ops->htt_set_paddrs_ring) htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx); } static inline void *ath10k_htt_get_vaddr_ring(struct ath10k_htt *htt) { if (!htt->rx_ops->htt_get_vaddr_ring) return NULL; return htt->rx_ops->htt_get_vaddr_ring(htt); } static inline void ath10k_htt_reset_paddrs_ring(struct ath10k_htt *htt, int idx) { if (htt->rx_ops->htt_reset_paddrs_ring) htt->rx_ops->htt_reset_paddrs_ring(htt, idx); } static inline bool ath10k_htt_rx_proc_rx_frag_ind(struct ath10k_htt *htt, struct htt_rx_fragment_indication *rx, struct sk_buff *skb) { if (!htt->rx_ops->htt_rx_proc_rx_frag_ind) return true; return htt->rx_ops->htt_rx_proc_rx_frag_ind(htt, rx, skb); } /* the driver strongly assumes that the rx header status be 64 bytes long, * so all possible rx_desc structures must respect this assumption. */ #define RX_HTT_HDR_STATUS_LEN 64 /* The rx descriptor structure layout is programmed via rx ring setup * so that FW knows how to transfer the rx descriptor to the host. * Unfortunately, though, QCA6174's firmware doesn't currently behave correctly * when modifying the structure layout of the rx descriptor beyond what it expects * (even if it correctly programmed during the rx ring setup). * Therefore we must keep two different memory layouts, abstract the rx descriptor * representation and use ath10k_rx_desc_ops * for correctly accessing rx descriptor data. */ /* base struct used for abstracting the rx descriptor representation */ struct htt_rx_desc { union { /* This field is filled on the host using the msdu buffer * from htt_rx_indication */ struct fw_rx_desc_base fw_desc; u32 pad; } __packed; } __packed; /* rx descriptor for wcn3990 and possibly extensible for newer cards * Buffers like this are placed on the rx ring. */ struct htt_rx_desc_v2 { struct htt_rx_desc base; struct { struct rx_attention attention; struct rx_frag_info frag_info; struct rx_mpdu_start mpdu_start; struct rx_msdu_start msdu_start; struct rx_msdu_end msdu_end; struct rx_mpdu_end mpdu_end; struct rx_ppdu_start ppdu_start; struct rx_ppdu_end ppdu_end; } __packed; u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN]; u8 msdu_payload[]; }; /* QCA6174, QCA988x, QCA99x0 dedicated rx descriptor to make sure their firmware * works correctly. We keep a single rx descriptor for all these three * families of cards because from tests it seems to be the most stable solution, * e.g. having a rx descriptor only for QCA6174 seldom caused firmware crashes * during some tests. * Buffers like this are placed on the rx ring. */ struct htt_rx_desc_v1 { struct htt_rx_desc base; struct { struct rx_attention attention; struct rx_frag_info_v1 frag_info; struct rx_mpdu_start mpdu_start; struct rx_msdu_start_v1 msdu_start; struct rx_msdu_end_v1 msdu_end; struct rx_mpdu_end mpdu_end; struct rx_ppdu_start ppdu_start; struct rx_ppdu_end_v1 ppdu_end; } __packed; u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN]; u8 msdu_payload[]; }; /* rx_desc abstraction */ struct ath10k_htt_rx_desc_ops { /* These fields are mandatory, they must be specified in any instance */ /* sizeof() of the rx_desc structure used by this hw */ size_t rx_desc_size; /* offset of msdu_payload inside the rx_desc structure used by this hw */ size_t rx_desc_msdu_payload_offset; /* These fields are options. * When a field is not provided the default implementation gets used * (see the ath10k_rx_desc_* operations below for more info about the defaults) */ bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd); int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd); /* Safely cast from a void* buffer containing an rx descriptor * to the proper rx_desc structure */ struct htt_rx_desc *(*rx_desc_from_raw_buffer)(void *buff); void (*rx_desc_get_offsets)(struct htt_rx_ring_rx_desc_offsets *offs); struct rx_attention *(*rx_desc_get_attention)(struct htt_rx_desc *rxd); struct rx_frag_info_common *(*rx_desc_get_frag_info)(struct htt_rx_desc *rxd); struct rx_mpdu_start *(*rx_desc_get_mpdu_start)(struct htt_rx_desc *rxd); struct rx_mpdu_end *(*rx_desc_get_mpdu_end)(struct htt_rx_desc *rxd); struct rx_msdu_start_common *(*rx_desc_get_msdu_start)(struct htt_rx_desc *rxd); struct rx_msdu_end_common *(*rx_desc_get_msdu_end)(struct htt_rx_desc *rxd); struct rx_ppdu_start *(*rx_desc_get_ppdu_start)(struct htt_rx_desc *rxd); struct rx_ppdu_end_common *(*rx_desc_get_ppdu_end)(struct htt_rx_desc *rxd); u8 *(*rx_desc_get_rx_hdr_status)(struct htt_rx_desc *rxd); u8 *(*rx_desc_get_msdu_payload)(struct htt_rx_desc *rxd); }; extern const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops; extern const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops; extern const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops; static inline int ath10k_htt_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) { if (hw->rx_desc_ops->rx_desc_get_l3_pad_bytes) return hw->rx_desc_ops->rx_desc_get_l3_pad_bytes(rxd); return 0; } static inline bool ath10k_htt_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) { if (hw->rx_desc_ops->rx_desc_get_msdu_limit_error) return hw->rx_desc_ops->rx_desc_get_msdu_limit_error(rxd); return false; } /* The default implementation of all these getters is using the old rx_desc, * so that it is easier to define the ath10k_htt_rx_desc_ops instances. * But probably, if new wireless cards must be supported, it would be better * to switch the default implementation to the new rx_desc, since this would * make the extension easier . */ static inline struct htt_rx_desc * ath10k_htt_rx_desc_from_raw_buffer(struct ath10k_hw_params *hw, void *buff) { if (hw->rx_desc_ops->rx_desc_from_raw_buffer) return hw->rx_desc_ops->rx_desc_from_raw_buffer(buff); return &((struct htt_rx_desc_v1 *)buff)->base; } static inline void ath10k_htt_rx_desc_get_offsets(struct ath10k_hw_params *hw, struct htt_rx_ring_rx_desc_offsets *off) { if (hw->rx_desc_ops->rx_desc_get_offsets) { hw->rx_desc_ops->rx_desc_get_offsets(off); } else { #define desc_offset(x) (offsetof(struct htt_rx_desc_v1, x) / 4) off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); off->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); #undef desc_offset } } static inline struct rx_attention * ath10k_htt_rx_desc_get_attention(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) { struct htt_rx_desc_v1 *rx_desc; if (hw->rx_desc_ops->rx_desc_get_attention) return hw->rx_desc_ops->rx_desc_get_attention(rxd); rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); return &rx_desc->attention; } static inline struct rx_frag_info_common * ath10k_htt_rx_desc_get_frag_info(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) { struct htt_rx_desc_v1 *rx_desc; if (hw->rx_desc_ops->rx_desc_get_frag_info) return hw->rx_desc_ops->rx_desc_get_frag_info(rxd); rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); return &rx_desc->frag_info.common; } static inline struct rx_mpdu_start * ath10k_htt_rx_desc_get_mpdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) { struct htt_rx_desc_v1 *rx_desc; if (hw->rx_desc_ops->rx_desc_get_mpdu_start) return hw->rx_desc_ops->rx_desc_get_mpdu_start(rxd); rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); return &rx_desc->mpdu_start; } static inline struct rx_mpdu_end * ath10k_htt_rx_desc_get_mpdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) { struct htt_rx_desc_v1 *rx_desc; if (hw->rx_desc_ops->rx_desc_get_mpdu_end) return hw->rx_desc_ops->rx_desc_get_mpdu_end(rxd); rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); return &rx_desc->mpdu_end; } static inline struct rx_msdu_start_common * ath10k_htt_rx_desc_get_msdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) { struct htt_rx_desc_v1 *rx_desc; if (hw->rx_desc_ops->rx_desc_get_msdu_start) return hw->rx_desc_ops->rx_desc_get_msdu_start(rxd); rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); return &rx_desc->msdu_start.common; } static inline struct rx_msdu_end_common * ath10k_htt_rx_desc_get_msdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) { struct htt_rx_desc_v1 *rx_desc; if (hw->rx_desc_ops->rx_desc_get_msdu_end) return hw->rx_desc_ops->rx_desc_get_msdu_end(rxd); rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); return &rx_desc->msdu_end.common; } static inline struct rx_ppdu_start * ath10k_htt_rx_desc_get_ppdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) { struct htt_rx_desc_v1 *rx_desc; if (hw->rx_desc_ops->rx_desc_get_ppdu_start) return hw->rx_desc_ops->rx_desc_get_ppdu_start(rxd); rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); return &rx_desc->ppdu_start; } static inline struct rx_ppdu_end_common * ath10k_htt_rx_desc_get_ppdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) { struct htt_rx_desc_v1 *rx_desc; if (hw->rx_desc_ops->rx_desc_get_ppdu_end) return hw->rx_desc_ops->rx_desc_get_ppdu_end(rxd); rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); return &rx_desc->ppdu_end.common; } static inline u8 * ath10k_htt_rx_desc_get_rx_hdr_status(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) { struct htt_rx_desc_v1 *rx_desc; if (hw->rx_desc_ops->rx_desc_get_rx_hdr_status) return hw->rx_desc_ops->rx_desc_get_rx_hdr_status(rxd); rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); return rx_desc->rx_hdr_status; } static inline u8 * ath10k_htt_rx_desc_get_msdu_payload(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd) { struct htt_rx_desc_v1 *rx_desc; if (hw->rx_desc_ops->rx_desc_get_msdu_payload) return hw->rx_desc_ops->rx_desc_get_msdu_payload(rxd); rx_desc = container_of(rxd, struct htt_rx_desc_v1, base); return rx_desc->msdu_payload; } #define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff #define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB 0 #define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK 0x00001000 #define HTT_RX_DESC_HL_INFO_ENCRYPTED_LSB 12 #define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_MASK 0x00002000 #define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_LSB 13 #define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK 0x00010000 #define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB 16 #define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_MASK 0x01fe0000 #define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_LSB 17 struct htt_rx_desc_base_hl { __le32 info; /* HTT_RX_DESC_HL_INFO_ */ }; struct htt_rx_chan_info { __le16 primary_chan_center_freq_mhz; __le16 contig_chan1_center_freq_mhz; __le16 contig_chan2_center_freq_mhz; u8 phy_mode; u8 reserved; } __packed; #define HTT_RX_DESC_ALIGN 8 #define HTT_MAC_ADDR_LEN 6 /* * FIX THIS * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size, * rounded up to a cache line size. */ #define HTT_RX_BUF_SIZE 2048 /* The HTT_RX_MSDU_SIZE can't be statically computed anymore, * because it depends on the underlying device rx_desc representation */ static inline int ath10k_htt_rx_msdu_size(struct ath10k_hw_params *hw) { return HTT_RX_BUF_SIZE - (int)hw->rx_desc_ops->rx_desc_size; } /* Refill a bunch of RX buffers for each refill round so that FW/HW can handle * aggregated traffic more nicely. */ #define ATH10K_HTT_MAX_NUM_REFILL 100 /* * DMA_MAP expects the buffer to be an integral number of cache lines. * Rather than checking the actual cache line size, this code makes a * conservative estimate of what the cache line size could be. */ #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */ #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1) /* These values are default in most firmware revisions and apparently are a * sweet spot performance wise. */ #define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3 #define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64 int ath10k_htt_connect(struct ath10k_htt *htt); int ath10k_htt_init(struct ath10k *ar); int ath10k_htt_setup(struct ath10k_htt *htt); int ath10k_htt_tx_start(struct ath10k_htt *htt); void ath10k_htt_tx_stop(struct ath10k_htt *htt); void ath10k_htt_tx_destroy(struct ath10k_htt *htt); void ath10k_htt_tx_free(struct ath10k_htt *htt); int ath10k_htt_rx_alloc(struct ath10k_htt *htt); int ath10k_htt_rx_ring_refill(struct ath10k *ar); void ath10k_htt_rx_free(struct ath10k_htt *htt); void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask, u64 cookie); void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb); int ath10k_htt_tx_fetch_resp(struct ath10k *ar, __le32 token, __le16 fetch_seq_num, struct htt_tx_fetch_record *records, size_t num_records); void ath10k_htt_op_ep_tx_credits(struct ath10k *ar); void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, struct ieee80211_txq *txq); void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, struct ieee80211_txq *txq); void ath10k_htt_tx_txq_sync(struct ath10k *ar); void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt); void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt); int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, bool is_presp); int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb); void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu); void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, struct sk_buff *skb); int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget); int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget); void ath10k_htt_set_tx_ops(struct ath10k_htt *htt); void ath10k_htt_set_rx_ops(struct ath10k_htt *htt); #endif |
| 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MATH_H #define _LINUX_MATH_H #include <linux/types.h> #include <asm/div64.h> #include <uapi/linux/kernel.h> /* * This looks more complex than it should be. But we need to * get the type for the ~ right in round_down (it needs to be * as wide as the result!), and we want to evaluate the macro * arguments just once each. */ #define __round_mask(x, y) ((__typeof__(x))((y)-1)) /** * round_up - round up to next specified power of 2 * @x: the value to round * @y: multiple to round up to (must be a power of 2) * * Rounds @x up to next multiple of @y (which must be a power of 2). * To perform arbitrary rounding up, use roundup() below. */ #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) /** * round_down - round down to next specified power of 2 * @x: the value to round * @y: multiple to round down to (must be a power of 2) * * Rounds @x down to next multiple of @y (which must be a power of 2). * To perform arbitrary rounding down, use rounddown() below. */ #define round_down(x, y) ((x) & ~__round_mask(x, y)) /** * DIV_ROUND_UP_POW2 - divide and round up * @n: numerator * @d: denominator (must be a power of 2) * * Divides @n by @d and rounds up to next multiple of @d (which must be a power * of 2). Avoids integer overflows that may occur with __KERNEL_DIV_ROUND_UP(). * Performance is roughly equivalent to __KERNEL_DIV_ROUND_UP(). */ #define DIV_ROUND_UP_POW2(n, d) \ ((n) / (d) + !!((n) & ((d) - 1))) #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP #define DIV_ROUND_DOWN_ULL(ll, d) \ ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) #define DIV_ROUND_UP_ULL(ll, d) \ DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d)) #if BITS_PER_LONG == 32 # define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d) #else # define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d) #endif /** * roundup - round up to the next specified multiple * @x: the value to up * @y: multiple to round up to * * Rounds @x up to next multiple of @y. If @y will always be a power * of 2, consider using the faster round_up(). */ #define roundup(x, y) ( \ { \ typeof(y) __y = y; \ (((x) + (__y - 1)) / __y) * __y; \ } \ ) /** * rounddown - round down to next specified multiple * @x: the value to round * @y: multiple to round down to * * Rounds @x down to next multiple of @y. If @y will always be a power * of 2, consider using the faster round_down(). */ #define rounddown(x, y) ( \ { \ typeof(x) __x = (x); \ __x - (__x % (y)); \ } \ ) /* * Divide positive or negative dividend by positive or negative divisor * and round to closest integer. Result is undefined for negative * divisors if the dividend variable type is unsigned and for negative * dividends if the divisor variable type is unsigned. */ #define DIV_ROUND_CLOSEST(x, divisor)( \ { \ typeof(x) __x = x; \ typeof(divisor) __d = divisor; \ (((typeof(x))-1) > 0 || \ ((typeof(divisor))-1) > 0 || \ (((__x) > 0) == ((__d) > 0))) ? \ (((__x) + ((__d) / 2)) / (__d)) : \ (((__x) - ((__d) / 2)) / (__d)); \ } \ ) /* * Same as above but for u64 dividends. divisor must be a 32-bit * number. */ #define DIV_ROUND_CLOSEST_ULL(x, divisor)( \ { \ typeof(divisor) __d = divisor; \ unsigned long long _tmp = (x) + (__d) / 2; \ do_div(_tmp, __d); \ _tmp; \ } \ ) #define __STRUCT_FRACT(type) \ struct type##_fract { \ __##type numerator; \ __##type denominator; \ }; __STRUCT_FRACT(s8) __STRUCT_FRACT(u8) __STRUCT_FRACT(s16) __STRUCT_FRACT(u16) __STRUCT_FRACT(s32) __STRUCT_FRACT(u32) #undef __STRUCT_FRACT /* Calculate "x * n / d" without unnecessary overflow or loss of precision. */ #define mult_frac(x, n, d) \ ({ \ typeof(x) x_ = (x); \ typeof(n) n_ = (n); \ typeof(d) d_ = (d); \ \ typeof(x_) q = x_ / d_; \ typeof(x_) r = x_ % d_; \ q * n_ + r * n_ / d_; \ }) #define sector_div(a, b) do_div(a, b) /** * abs - return absolute value of an argument * @x: the value. If it is unsigned type, it is converted to signed type first. * char is treated as if it was signed (regardless of whether it really is) * but the macro's return type is preserved as char. * * Return: an absolute value of x. */ #define abs(x) __abs_choose_expr(x, long long, \ __abs_choose_expr(x, long, \ __abs_choose_expr(x, int, \ __abs_choose_expr(x, short, \ __abs_choose_expr(x, char, \ __builtin_choose_expr( \ __builtin_types_compatible_p(typeof(x), char), \ (char)({ signed char __x = (x); __x<0?-__x:__x; }), \ ((void)0))))))) #define __abs_choose_expr(x, type, other) __builtin_choose_expr( \ __builtin_types_compatible_p(typeof(x), signed type) || \ __builtin_types_compatible_p(typeof(x), unsigned type), \ ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other) /** * abs_diff - return absolute value of the difference between the arguments * @a: the first argument * @b: the second argument * * @a and @b have to be of the same type. With this restriction we compare * signed to signed and unsigned to unsigned. The result is the subtraction * the smaller of the two from the bigger, hence result is always a positive * value. * * Return: an absolute value of the difference between the @a and @b. */ #define abs_diff(a, b) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ (void)(&__a == &__b); \ __a > __b ? (__a - __b) : (__b - __a); \ }) /** * reciprocal_scale - "scale" a value into range [0, ep_ro) * @val: value * @ep_ro: right open interval endpoint * * Perform a "reciprocal multiplication" in order to "scale" a value into * range [0, @ep_ro), where the upper interval endpoint is right-open. * This is useful, e.g. for accessing a index of an array containing * @ep_ro elements, for example. Think of it as sort of modulus, only that * the result isn't that of modulo. ;) Note that if initial input is a * small value, then result will return 0. * * Return: a result based on @val in interval [0, @ep_ro). */ static inline u32 reciprocal_scale(u32 val, u32 ep_ro) { return (u32)(((u64) val * ep_ro) >> 32); } u64 int_pow(u64 base, unsigned int exp); unsigned long int_sqrt(unsigned long); #if BITS_PER_LONG < 64 u32 int_sqrt64(u64 x); #else static inline u32 int_sqrt64(u64 x) { return (u32)int_sqrt(x); } #endif #endif /* _LINUX_MATH_H */ |
| 42 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * x86-optimized SHA-512 block function * * Copyright 2025 Google LLC */ #include <asm/fpu/api.h> #include <linux/static_call.h> DEFINE_STATIC_CALL(sha512_blocks_x86, sha512_blocks_generic); #define DEFINE_X86_SHA512_FN(c_fn, asm_fn) \ asmlinkage void asm_fn(struct sha512_block_state *state, \ const u8 *data, size_t nblocks); \ static void c_fn(struct sha512_block_state *state, const u8 *data, \ size_t nblocks) \ { \ if (likely(irq_fpu_usable())) { \ kernel_fpu_begin(); \ asm_fn(state, data, nblocks); \ kernel_fpu_end(); \ } else { \ sha512_blocks_generic(state, data, nblocks); \ } \ } DEFINE_X86_SHA512_FN(sha512_blocks_ssse3, sha512_transform_ssse3); DEFINE_X86_SHA512_FN(sha512_blocks_avx, sha512_transform_avx); DEFINE_X86_SHA512_FN(sha512_blocks_avx2, sha512_transform_rorx); static void sha512_blocks(struct sha512_block_state *state, const u8 *data, size_t nblocks) { static_call(sha512_blocks_x86)(state, data, nblocks); } #define sha512_mod_init_arch sha512_mod_init_arch static void sha512_mod_init_arch(void) { if (cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL) && boot_cpu_has(X86_FEATURE_AVX)) { if (boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI2)) static_call_update(sha512_blocks_x86, sha512_blocks_avx2); else static_call_update(sha512_blocks_x86, sha512_blocks_avx); } else if (boot_cpu_has(X86_FEATURE_SSSE3)) { static_call_update(sha512_blocks_x86, sha512_blocks_ssse3); } } |
| 464 457 84 459 456 457 3069 3067 2860 6 2849 2850 2852 2858 335 2 2 337 3069 324 2841 378 2860 339 2470 2472 2471 1230 3086 2960 2636 3092 244 1 219 377 3050 2687 3058 3002 99 377 3059 63 2838 2796 2464 2781 456 3016 3066 3062 3077 1 4 6 1 5 7 15 2 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 | // SPDX-License-Identifier: GPL-2.0 /* * kernel userspace event delivery * * Copyright (C) 2004 Red Hat, Inc. All rights reserved. * Copyright (C) 2004 Novell, Inc. All rights reserved. * Copyright (C) 2004 IBM, Inc. All rights reserved. * * Authors: * Robert Love <rml@novell.com> * Kay Sievers <kay.sievers@vrfy.org> * Arjan van de Ven <arjanv@redhat.com> * Greg Kroah-Hartman <greg@kroah.com> */ #include <linux/spinlock.h> #include <linux/string.h> #include <linux/kobject.h> #include <linux/export.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/socket.h> #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/uidgid.h> #include <linux/uuid.h> #include <linux/ctype.h> #include <net/sock.h> #include <net/netlink.h> #include <net/net_namespace.h> atomic64_t uevent_seqnum; #ifdef CONFIG_UEVENT_HELPER char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; #endif struct uevent_sock { struct list_head list; struct sock *sk; }; #ifdef CONFIG_NET static LIST_HEAD(uevent_sock_list); /* This lock protects uevent_sock_list */ static DEFINE_MUTEX(uevent_sock_mutex); #endif /* the strings here must match the enum in include/linux/kobject.h */ static const char *kobject_actions[] = { [KOBJ_ADD] = "add", [KOBJ_REMOVE] = "remove", [KOBJ_CHANGE] = "change", [KOBJ_MOVE] = "move", [KOBJ_ONLINE] = "online", [KOBJ_OFFLINE] = "offline", [KOBJ_BIND] = "bind", [KOBJ_UNBIND] = "unbind", }; static int kobject_action_type(const char *buf, size_t count, enum kobject_action *type, const char **args) { enum kobject_action action; size_t count_first; const char *args_start; int ret = -EINVAL; if (count && (buf[count-1] == '\n' || buf[count-1] == '\0')) count--; if (!count) goto out; args_start = strnchr(buf, count, ' '); if (args_start) { count_first = args_start - buf; args_start = args_start + 1; } else count_first = count; for (action = 0; action < ARRAY_SIZE(kobject_actions); action++) { if (strncmp(kobject_actions[action], buf, count_first) != 0) continue; if (kobject_actions[action][count_first] != '\0') continue; if (args) *args = args_start; *type = action; ret = 0; break; } out: return ret; } static const char *action_arg_word_end(const char *buf, const char *buf_end, char delim) { const char *next = buf; while (next <= buf_end && *next != delim) if (!isalnum(*next++)) return NULL; if (next == buf) return NULL; return next; } static int kobject_action_args(const char *buf, size_t count, struct kobj_uevent_env **ret_env) { struct kobj_uevent_env *env = NULL; const char *next, *buf_end, *key; int key_len; int r = -EINVAL; if (count && (buf[count - 1] == '\n' || buf[count - 1] == '\0')) count--; if (!count) return -EINVAL; env = kzalloc(sizeof(*env), GFP_KERNEL); if (!env) return -ENOMEM; /* first arg is UUID */ if (count < UUID_STRING_LEN || !uuid_is_valid(buf) || add_uevent_var(env, "SYNTH_UUID=%.*s", UUID_STRING_LEN, buf)) goto out; /* * the rest are custom environment variables in KEY=VALUE * format with ' ' delimiter between each KEY=VALUE pair */ next = buf + UUID_STRING_LEN; buf_end = buf + count - 1; while (next <= buf_end) { if (*next != ' ') goto out; /* skip the ' ', key must follow */ key = ++next; if (key > buf_end) goto out; buf = next; next = action_arg_word_end(buf, buf_end, '='); if (!next || next > buf_end || *next != '=') goto out; key_len = next - buf; /* skip the '=', value must follow */ if (++next > buf_end) goto out; buf = next; next = action_arg_word_end(buf, buf_end, ' '); if (!next) goto out; if (add_uevent_var(env, "SYNTH_ARG_%.*s=%.*s", key_len, key, (int) (next - buf), buf)) goto out; } r = 0; out: if (r) kfree(env); else *ret_env = env; return r; } /** * kobject_synth_uevent - send synthetic uevent with arguments * * @kobj: struct kobject for which synthetic uevent is to be generated * @buf: buffer containing action type and action args, newline is ignored * @count: length of buffer * * Returns 0 if kobject_synthetic_uevent() is completed with success or the * corresponding error when it fails. */ int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count) { char *no_uuid_envp[] = { "SYNTH_UUID=0", NULL }; enum kobject_action action; const char *action_args; struct kobj_uevent_env *env; const char *msg = NULL, *devpath; int r; r = kobject_action_type(buf, count, &action, &action_args); if (r) { msg = "unknown uevent action string"; goto out; } if (!action_args) { r = kobject_uevent_env(kobj, action, no_uuid_envp); goto out; } r = kobject_action_args(action_args, count - (action_args - buf), &env); if (r == -EINVAL) { msg = "incorrect uevent action arguments"; goto out; } if (r) goto out; r = kobject_uevent_env(kobj, action, env->envp); kfree(env); out: if (r) { devpath = kobject_get_path(kobj, GFP_KERNEL); pr_warn("synth uevent: %s: %s\n", devpath ?: "unknown device", msg ?: "failed to send uevent"); kfree(devpath); } return r; } #ifdef CONFIG_UEVENT_HELPER static int kobj_usermode_filter(struct kobject *kobj) { const struct kobj_ns_type_operations *ops; ops = kobj_ns_ops(kobj); if (ops) { const void *init_ns, *ns; ns = kobj->ktype->namespace(kobj); init_ns = ops->initial_ns(); return ns != init_ns; } return 0; } static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem) { int buffer_size = sizeof(env->buf) - env->buflen; int len; len = strscpy(&env->buf[env->buflen], subsystem, buffer_size); if (len < 0) { pr_warn("%s: insufficient buffer space (%u left) for %s\n", __func__, buffer_size, subsystem); return -ENOMEM; } env->argv[0] = uevent_helper; env->argv[1] = &env->buf[env->buflen]; env->argv[2] = NULL; env->buflen += len + 1; return 0; } static void cleanup_uevent_env(struct subprocess_info *info) { kfree(info->data); } #endif #ifdef CONFIG_NET static struct sk_buff *alloc_uevent_skb(struct kobj_uevent_env *env, const char *action_string, const char *devpath) { struct netlink_skb_parms *parms; struct sk_buff *skb = NULL; char *scratch; size_t len; /* allocate message with maximum possible size */ len = strlen(action_string) + strlen(devpath) + 2; skb = alloc_skb(len + env->buflen, GFP_KERNEL); if (!skb) return NULL; /* add header */ scratch = skb_put(skb, len); sprintf(scratch, "%s@%s", action_string, devpath); skb_put_data(skb, env->buf, env->buflen); parms = &NETLINK_CB(skb); parms->creds.uid = GLOBAL_ROOT_UID; parms->creds.gid = GLOBAL_ROOT_GID; parms->dst_group = 1; parms->portid = 0; return skb; } static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env, const char *action_string, const char *devpath) { struct sk_buff *skb = NULL; struct uevent_sock *ue_sk; int retval = 0; /* send netlink message */ mutex_lock(&uevent_sock_mutex); list_for_each_entry(ue_sk, &uevent_sock_list, list) { struct sock *uevent_sock = ue_sk->sk; if (!netlink_has_listeners(uevent_sock, 1)) continue; if (!skb) { retval = -ENOMEM; skb = alloc_uevent_skb(env, action_string, devpath); if (!skb) continue; } retval = netlink_broadcast(uevent_sock, skb_get(skb), 0, 1, GFP_KERNEL); /* ENOBUFS should be handled in userspace */ if (retval == -ENOBUFS || retval == -ESRCH) retval = 0; } mutex_unlock(&uevent_sock_mutex); consume_skb(skb); return retval; } static int uevent_net_broadcast_tagged(struct sock *usk, struct kobj_uevent_env *env, const char *action_string, const char *devpath) { struct user_namespace *owning_user_ns = sock_net(usk)->user_ns; struct sk_buff *skb = NULL; int ret = 0; skb = alloc_uevent_skb(env, action_string, devpath); if (!skb) return -ENOMEM; /* fix credentials */ if (owning_user_ns != &init_user_ns) { struct netlink_skb_parms *parms = &NETLINK_CB(skb); kuid_t root_uid; kgid_t root_gid; /* fix uid */ root_uid = make_kuid(owning_user_ns, 0); if (uid_valid(root_uid)) parms->creds.uid = root_uid; /* fix gid */ root_gid = make_kgid(owning_user_ns, 0); if (gid_valid(root_gid)) parms->creds.gid = root_gid; } ret = netlink_broadcast(usk, skb, 0, 1, GFP_KERNEL); /* ENOBUFS should be handled in userspace */ if (ret == -ENOBUFS || ret == -ESRCH) ret = 0; return ret; } #endif static int kobject_uevent_net_broadcast(struct kobject *kobj, struct kobj_uevent_env *env, const char *action_string, const char *devpath) { int ret = 0; #ifdef CONFIG_NET const struct kobj_ns_type_operations *ops; const struct net *net = NULL; ops = kobj_ns_ops(kobj); if (!ops && kobj->kset) { struct kobject *ksobj = &kobj->kset->kobj; if (ksobj->parent != NULL) ops = kobj_ns_ops(ksobj->parent); } /* kobjects currently only carry network namespace tags and they * are the only tag relevant here since we want to decide which * network namespaces to broadcast the uevent into. */ if (ops && ops->netlink_ns && kobj->ktype->namespace) if (ops->type == KOBJ_NS_TYPE_NET) net = kobj->ktype->namespace(kobj); if (!net) ret = uevent_net_broadcast_untagged(env, action_string, devpath); else ret = uevent_net_broadcast_tagged(net->uevent_sock->sk, env, action_string, devpath); #endif return ret; } static void zap_modalias_env(struct kobj_uevent_env *env) { static const char modalias_prefix[] = "MODALIAS="; size_t len; int i, j; for (i = 0; i < env->envp_idx;) { if (strncmp(env->envp[i], modalias_prefix, sizeof(modalias_prefix) - 1)) { i++; continue; } len = strlen(env->envp[i]) + 1; if (i != env->envp_idx - 1) { /* @env->envp[] contains pointers to @env->buf[] * with @env->buflen chars, and we are removing * variable MODALIAS here pointed by @env->envp[i] * with length @len as shown below: * * 0 @env->buf[] @env->buflen * --------------------------------------------- * ^ ^ ^ ^ * | |-> @len <-| target block | * @env->envp[0] @env->envp[i] @env->envp[i + 1] * * so the "target block" indicated above is moved * backward by @len, and its right size is * @env->buflen - (@env->envp[i + 1] - @env->envp[0]). */ memmove(env->envp[i], env->envp[i + 1], env->buflen - (env->envp[i + 1] - env->envp[0])); for (j = i; j < env->envp_idx - 1; j++) env->envp[j] = env->envp[j + 1] - len; } env->envp_idx--; env->buflen -= len; } } /** * kobject_uevent_env - send an uevent with environmental data * * @kobj: struct kobject that the action is happening to * @action: action that is happening * @envp_ext: pointer to environmental data * * Returns 0 if kobject_uevent_env() is completed with success or the * corresponding error when it fails. */ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, char *envp_ext[]) { struct kobj_uevent_env *env; const char *action_string = kobject_actions[action]; const char *devpath = NULL; const char *subsystem; struct kobject *top_kobj; struct kset *kset; const struct kset_uevent_ops *uevent_ops; int i = 0; int retval = 0; /* * Mark "remove" event done regardless of result, for some subsystems * do not want to re-trigger "remove" event via automatic cleanup. */ if (action == KOBJ_REMOVE) kobj->state_remove_uevent_sent = 1; pr_debug("kobject: '%s' (%p): %s\n", kobject_name(kobj), kobj, __func__); /* search the kset we belong to */ top_kobj = kobj; while (!top_kobj->kset && top_kobj->parent) top_kobj = top_kobj->parent; if (!top_kobj->kset) { pr_debug("kobject: '%s' (%p): %s: attempted to send uevent " "without kset!\n", kobject_name(kobj), kobj, __func__); return -EINVAL; } kset = top_kobj->kset; uevent_ops = kset->uevent_ops; /* skip the event, if uevent_suppress is set*/ if (kobj->uevent_suppress) { pr_debug("kobject: '%s' (%p): %s: uevent_suppress " "caused the event to drop!\n", kobject_name(kobj), kobj, __func__); return 0; } /* skip the event, if the filter returns zero. */ if (uevent_ops && uevent_ops->filter) if (!uevent_ops->filter(kobj)) { pr_debug("kobject: '%s' (%p): %s: filter function " "caused the event to drop!\n", kobject_name(kobj), kobj, __func__); return 0; } /* originating subsystem */ if (uevent_ops && uevent_ops->name) subsystem = uevent_ops->name(kobj); else subsystem = kobject_name(&kset->kobj); if (!subsystem) { pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the " "event to drop!\n", kobject_name(kobj), kobj, __func__); return 0; } /* environment buffer */ env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); if (!env) return -ENOMEM; /* complete object path */ devpath = kobject_get_path(kobj, GFP_KERNEL); if (!devpath) { retval = -ENOENT; goto exit; } /* default keys */ retval = add_uevent_var(env, "ACTION=%s", action_string); if (retval) goto exit; retval = add_uevent_var(env, "DEVPATH=%s", devpath); if (retval) goto exit; retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem); if (retval) goto exit; /* keys passed in from the caller */ if (envp_ext) { for (i = 0; envp_ext[i]; i++) { retval = add_uevent_var(env, "%s", envp_ext[i]); if (retval) goto exit; } } /* let the kset specific function add its stuff */ if (uevent_ops && uevent_ops->uevent) { retval = uevent_ops->uevent(kobj, env); if (retval) { pr_debug("kobject: '%s' (%p): %s: uevent() returned " "%d\n", kobject_name(kobj), kobj, __func__, retval); goto exit; } } switch (action) { case KOBJ_ADD: /* * Mark "add" event so we can make sure we deliver "remove" * event to userspace during automatic cleanup. If * the object did send an "add" event, "remove" will * automatically generated by the core, if not already done * by the caller. */ kobj->state_add_uevent_sent = 1; break; case KOBJ_UNBIND: zap_modalias_env(env); break; default: break; } /* we will send an event, so request a new sequence number */ retval = add_uevent_var(env, "SEQNUM=%llu", atomic64_inc_return(&uevent_seqnum)); if (retval) goto exit; retval = kobject_uevent_net_broadcast(kobj, env, action_string, devpath); #ifdef CONFIG_UEVENT_HELPER /* call uevent_helper, usually only enabled during early boot */ if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { struct subprocess_info *info; retval = add_uevent_var(env, "HOME=/"); if (retval) goto exit; retval = add_uevent_var(env, "PATH=/sbin:/bin:/usr/sbin:/usr/bin"); if (retval) goto exit; retval = init_uevent_argv(env, subsystem); if (retval) goto exit; retval = -ENOMEM; info = call_usermodehelper_setup(env->argv[0], env->argv, env->envp, GFP_KERNEL, NULL, cleanup_uevent_env, env); if (info) { retval = call_usermodehelper_exec(info, UMH_NO_WAIT); env = NULL; /* freed by cleanup_uevent_env */ } } #endif exit: kfree(devpath); kfree(env); return retval; } EXPORT_SYMBOL_GPL(kobject_uevent_env); /** * kobject_uevent - notify userspace by sending an uevent * * @kobj: struct kobject that the action is happening to * @action: action that is happening * * Returns 0 if kobject_uevent() is completed with success or the * corresponding error when it fails. */ int kobject_uevent(struct kobject *kobj, enum kobject_action action) { return kobject_uevent_env(kobj, action, NULL); } EXPORT_SYMBOL_GPL(kobject_uevent); /** * add_uevent_var - add key value string to the environment buffer * @env: environment buffer structure * @format: printf format for the key=value pair * * Returns 0 if environment variable was added successfully or -ENOMEM * if no space was available. */ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) { va_list args; int len; if (env->envp_idx >= ARRAY_SIZE(env->envp)) { WARN(1, KERN_ERR "add_uevent_var: too many keys\n"); return -ENOMEM; } va_start(args, format); len = vsnprintf(&env->buf[env->buflen], sizeof(env->buf) - env->buflen, format, args); va_end(args); if (len >= (sizeof(env->buf) - env->buflen)) { WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n"); return -ENOMEM; } env->envp[env->envp_idx++] = &env->buf[env->buflen]; env->buflen += len + 1; return 0; } EXPORT_SYMBOL_GPL(add_uevent_var); #if defined(CONFIG_NET) static int uevent_net_broadcast(struct sock *usk, struct sk_buff *skb, struct netlink_ext_ack *extack) { /* u64 to chars: 2^64 - 1 = 21 chars */ char buf[sizeof("SEQNUM=") + 21]; struct sk_buff *skbc; int ret; /* bump and prepare sequence number */ ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu", atomic64_inc_return(&uevent_seqnum)); if (ret < 0 || (size_t)ret >= sizeof(buf)) return -ENOMEM; ret++; /* verify message does not overflow */ if ((skb->len + ret) > UEVENT_BUFFER_SIZE) { NL_SET_ERR_MSG(extack, "uevent message too big"); return -EINVAL; } /* copy skb and extend to accommodate sequence number */ skbc = skb_copy_expand(skb, 0, ret, GFP_KERNEL); if (!skbc) return -ENOMEM; /* append sequence number */ skb_put_data(skbc, buf, ret); /* remove msg header */ skb_pull(skbc, NLMSG_HDRLEN); /* set portid 0 to inform userspace message comes from kernel */ NETLINK_CB(skbc).portid = 0; NETLINK_CB(skbc).dst_group = 1; ret = netlink_broadcast(usk, skbc, 0, 1, GFP_KERNEL); /* ENOBUFS should be handled in userspace */ if (ret == -ENOBUFS || ret == -ESRCH) ret = 0; return ret; } static int uevent_net_rcv_skb(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net; int ret; if (!nlmsg_data(nlh)) return -EINVAL; /* * Verify that we are allowed to send messages to the target * network namespace. The caller must have CAP_SYS_ADMIN in the * owning user namespace of the target network namespace. */ net = sock_net(NETLINK_CB(skb).sk); if (!netlink_ns_capable(skb, net->user_ns, CAP_SYS_ADMIN)) { NL_SET_ERR_MSG(extack, "missing CAP_SYS_ADMIN capability"); return -EPERM; } ret = uevent_net_broadcast(net->uevent_sock->sk, skb, extack); return ret; } static void uevent_net_rcv(struct sk_buff *skb) { netlink_rcv_skb(skb, &uevent_net_rcv_skb); } static int uevent_net_init(struct net *net) { struct uevent_sock *ue_sk; struct netlink_kernel_cfg cfg = { .groups = 1, .input = uevent_net_rcv, .flags = NL_CFG_F_NONROOT_RECV }; ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL); if (!ue_sk) return -ENOMEM; ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg); if (!ue_sk->sk) { pr_err("kobject_uevent: unable to create netlink socket!\n"); kfree(ue_sk); return -ENODEV; } net->uevent_sock = ue_sk; /* Restrict uevents to initial user namespace. */ if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) { mutex_lock(&uevent_sock_mutex); list_add_tail(&ue_sk->list, &uevent_sock_list); mutex_unlock(&uevent_sock_mutex); } return 0; } static void uevent_net_exit(struct net *net) { struct uevent_sock *ue_sk = net->uevent_sock; if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) { mutex_lock(&uevent_sock_mutex); list_del(&ue_sk->list); mutex_unlock(&uevent_sock_mutex); } netlink_kernel_release(ue_sk->sk); kfree(ue_sk); } static struct pernet_operations uevent_net_ops = { .init = uevent_net_init, .exit = uevent_net_exit, }; static int __init kobject_uevent_init(void) { return register_pernet_subsys(&uevent_net_ops); } postcore_initcall(kobject_uevent_init); #endif #ifdef CONFIG_UEVENT_HELPER static const struct ctl_table uevent_helper_sysctl_table[] = { { .procname = "hotplug", .data = &uevent_helper, .maxlen = UEVENT_HELPER_PATH_LEN, .mode = 0644, .proc_handler = proc_dostring, }, }; static int __init init_uevent_helper_sysctl(void) { register_sysctl_init("kernel", uevent_helper_sysctl_table); return 0; } postcore_initcall(init_uevent_helper_sysctl); #endif |
| 79 90 6 83 125 128 129 130 127 126 127 127 72 72 126 28 47 81 127 72 105 126 77 72 26 127 125 126 124 125 110 112 95 11 110 96 1 90 7 96 1 67 69 108 107 107 108 106 93 17 107 69 2 1 1 74 83 1 1 2 4 80 80 72 73 66 72 66 72 1 71 70 2 70 70 75 75 75 75 68 89 87 89 89 86 2 109 109 109 8 94 104 97 6 98 98 1 103 69 100 4 82 18 14 1 98 7 87 99 100 102 10 93 8 2 7 68 68 11 72 65 71 71 69 72 7 6 7 65 65 62 72 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 | /* * Copyright (C) 2014 Red Hat * Copyright (C) 2014 Intel Corp. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rob Clark <robdclark@gmail.com> * Daniel Vetter <daniel.vetter@ffwll.ch> */ #include <linux/export.h> #include <linux/sync_file.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_uapi.h> #include <drm/drm_blend.h> #include <drm/drm_bridge.h> #include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_mode.h> #include <drm/drm_print.h> #include <drm/drm_writeback.h> #include "drm_crtc_internal.h" #include "drm_internal.h" void __drm_crtc_commit_free(struct kref *kref) { struct drm_crtc_commit *commit = container_of(kref, struct drm_crtc_commit, ref); kfree(commit); } EXPORT_SYMBOL(__drm_crtc_commit_free); /** * drm_crtc_commit_wait - Waits for a commit to complete * @commit: &drm_crtc_commit to wait for * * Waits for a given &drm_crtc_commit to be programmed into the * hardware and flipped to. * * Returns: * 0 on success, a negative error code otherwise. */ int drm_crtc_commit_wait(struct drm_crtc_commit *commit) { unsigned long timeout = 10 * HZ; int ret; if (!commit) return 0; ret = wait_for_completion_timeout(&commit->hw_done, timeout); if (!ret) { drm_err(commit->crtc->dev, "hw_done timed out\n"); return -ETIMEDOUT; } /* * Currently no support for overwriting flips, hence * stall for previous one to execute completely. */ ret = wait_for_completion_timeout(&commit->flip_done, timeout); if (!ret) { drm_err(commit->crtc->dev, "flip_done timed out\n"); return -ETIMEDOUT; } return 0; } EXPORT_SYMBOL(drm_crtc_commit_wait); /** * drm_atomic_state_default_release - * release memory initialized by drm_atomic_state_init * @state: atomic state * * Free all the memory allocated by drm_atomic_state_init. * This should only be used by drivers which are still subclassing * &drm_atomic_state and haven't switched to &drm_private_state yet. */ void drm_atomic_state_default_release(struct drm_atomic_state *state) { kfree(state->connectors); kfree(state->crtcs); kfree(state->planes); kfree(state->private_objs); } EXPORT_SYMBOL(drm_atomic_state_default_release); /** * drm_atomic_state_init - init new atomic state * @dev: DRM device * @state: atomic state * * Default implementation for filling in a new atomic state. * This should only be used by drivers which are still subclassing * &drm_atomic_state and haven't switched to &drm_private_state yet. */ int drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) { kref_init(&state->ref); /* TODO legacy paths should maybe do a better job about * setting this appropriately? */ state->allow_modeset = true; state->crtcs = kcalloc(dev->mode_config.num_crtc, sizeof(*state->crtcs), GFP_KERNEL); if (!state->crtcs) goto fail; state->planes = kcalloc(dev->mode_config.num_total_plane, sizeof(*state->planes), GFP_KERNEL); if (!state->planes) goto fail; /* * Because drm_atomic_state can be committed asynchronously we need our * own reference and cannot rely on the on implied by drm_file in the * ioctl call. */ drm_dev_get(dev); state->dev = dev; drm_dbg_atomic(dev, "Allocated atomic state %p\n", state); return 0; fail: drm_atomic_state_default_release(state); return -ENOMEM; } EXPORT_SYMBOL(drm_atomic_state_init); /** * drm_atomic_state_alloc - allocate atomic state * @dev: DRM device * * This allocates an empty atomic state to track updates. */ struct drm_atomic_state * drm_atomic_state_alloc(struct drm_device *dev) { struct drm_mode_config *config = &dev->mode_config; if (!config->funcs->atomic_state_alloc) { struct drm_atomic_state *state; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return NULL; if (drm_atomic_state_init(dev, state) < 0) { kfree(state); return NULL; } return state; } return config->funcs->atomic_state_alloc(dev); } EXPORT_SYMBOL(drm_atomic_state_alloc); /** * drm_atomic_state_default_clear - clear base atomic state * @state: atomic state * * Default implementation for clearing atomic state. * This should only be used by drivers which are still subclassing * &drm_atomic_state and haven't switched to &drm_private_state yet. */ void drm_atomic_state_default_clear(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct drm_mode_config *config = &dev->mode_config; int i; drm_dbg_atomic(dev, "Clearing atomic state %p\n", state); for (i = 0; i < state->num_connector; i++) { struct drm_connector *connector = state->connectors[i].ptr; if (!connector) continue; connector->funcs->atomic_destroy_state(connector, state->connectors[i].state); state->connectors[i].ptr = NULL; state->connectors[i].state = NULL; state->connectors[i].old_state = NULL; state->connectors[i].new_state = NULL; drm_connector_put(connector); } for (i = 0; i < config->num_crtc; i++) { struct drm_crtc *crtc = state->crtcs[i].ptr; if (!crtc) continue; crtc->funcs->atomic_destroy_state(crtc, state->crtcs[i].state); state->crtcs[i].ptr = NULL; state->crtcs[i].state = NULL; state->crtcs[i].old_state = NULL; state->crtcs[i].new_state = NULL; if (state->crtcs[i].commit) { drm_crtc_commit_put(state->crtcs[i].commit); state->crtcs[i].commit = NULL; } } for (i = 0; i < config->num_total_plane; i++) { struct drm_plane *plane = state->planes[i].ptr; if (!plane) continue; plane->funcs->atomic_destroy_state(plane, state->planes[i].state); state->planes[i].ptr = NULL; state->planes[i].state = NULL; state->planes[i].old_state = NULL; state->planes[i].new_state = NULL; } for (i = 0; i < state->num_private_objs; i++) { struct drm_private_obj *obj = state->private_objs[i].ptr; obj->funcs->atomic_destroy_state(obj, state->private_objs[i].state); state->private_objs[i].ptr = NULL; state->private_objs[i].state = NULL; state->private_objs[i].old_state = NULL; state->private_objs[i].new_state = NULL; } state->num_private_objs = 0; if (state->fake_commit) { drm_crtc_commit_put(state->fake_commit); state->fake_commit = NULL; } } EXPORT_SYMBOL(drm_atomic_state_default_clear); /** * drm_atomic_state_clear - clear state object * @state: atomic state * * When the w/w mutex algorithm detects a deadlock we need to back off and drop * all locks. So someone else could sneak in and change the current modeset * configuration. Which means that all the state assembled in @state is no * longer an atomic update to the current state, but to some arbitrary earlier * state. Which could break assumptions the driver's * &drm_mode_config_funcs.atomic_check likely relies on. * * Hence we must clear all cached state and completely start over, using this * function. */ void drm_atomic_state_clear(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct drm_mode_config *config = &dev->mode_config; if (config->funcs->atomic_state_clear) config->funcs->atomic_state_clear(state); else drm_atomic_state_default_clear(state); } EXPORT_SYMBOL(drm_atomic_state_clear); /** * __drm_atomic_state_free - free all memory for an atomic state * @ref: This atomic state to deallocate * * This frees all memory associated with an atomic state, including all the * per-object state for planes, CRTCs and connectors. */ void __drm_atomic_state_free(struct kref *ref) { struct drm_atomic_state *state = container_of(ref, typeof(*state), ref); struct drm_device *dev = state->dev; struct drm_mode_config *config = &dev->mode_config; drm_atomic_state_clear(state); drm_dbg_atomic(state->dev, "Freeing atomic state %p\n", state); if (config->funcs->atomic_state_free) { config->funcs->atomic_state_free(state); } else { drm_atomic_state_default_release(state); kfree(state); } drm_dev_put(dev); } EXPORT_SYMBOL(__drm_atomic_state_free); /** * drm_atomic_get_crtc_state - get CRTC state * @state: global atomic state object * @crtc: CRTC to get state object for * * This function returns the CRTC state for the given CRTC, allocating it if * needed. It will also grab the relevant CRTC lock to make sure that the state * is consistent. * * WARNING: Drivers may only add new CRTC states to a @state if * drm_atomic_state.allow_modeset is set, or if it's a driver-internal commit * not created by userspace through an IOCTL call. * * Returns: * Either the allocated state or the error code encoded into the pointer. When * the error is EDEADLK then the w/w mutex code has detected a deadlock and the * entire atomic sequence must be restarted. All other errors are fatal. */ struct drm_crtc_state * drm_atomic_get_crtc_state(struct drm_atomic_state *state, struct drm_crtc *crtc) { int ret, index = drm_crtc_index(crtc); struct drm_crtc_state *crtc_state; WARN_ON(!state->acquire_ctx); crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); if (crtc_state) return crtc_state; ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx); if (ret) return ERR_PTR(ret); crtc_state = crtc->funcs->atomic_duplicate_state(crtc); if (!crtc_state) return ERR_PTR(-ENOMEM); state->crtcs[index].state = crtc_state; state->crtcs[index].old_state = crtc->state; state->crtcs[index].new_state = crtc_state; state->crtcs[index].ptr = crtc; crtc_state->state = state; drm_dbg_atomic(state->dev, "Added [CRTC:%d:%s] %p state to %p\n", crtc->base.id, crtc->name, crtc_state, state); return crtc_state; } EXPORT_SYMBOL(drm_atomic_get_crtc_state); static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state, const struct drm_crtc_state *new_crtc_state) { struct drm_crtc *crtc = new_crtc_state->crtc; /* NOTE: we explicitly don't enforce constraints such as primary * layer covering entire screen, since that is something we want * to allow (on hw that supports it). For hw that does not, it * should be checked in driver's crtc->atomic_check() vfunc. * * TODO: Add generic modeset state checks once we support those. */ if (new_crtc_state->active && !new_crtc_state->enable) { drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] active without enabled\n", crtc->base.id, crtc->name); return -EINVAL; } /* The state->enable vs. state->mode_blob checks can be WARN_ON, * as this is a kernel-internal detail that userspace should never * be able to trigger. */ if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) { drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] enabled without mode blob\n", crtc->base.id, crtc->name); return -EINVAL; } if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) { drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] disabled with mode blob\n", crtc->base.id, crtc->name); return -EINVAL; } /* * Reject event generation for when a CRTC is off and stays off. * It wouldn't be hard to implement this, but userspace has a track * record of happily burning through 100% cpu (or worse, crash) when the * display pipe is suspended. To avoid all that fun just reject updates * that ask for events since likely that indicates a bug in the * compositor's drawing loop. This is consistent with the vblank IOCTL * and legacy page_flip IOCTL which also reject service on a disabled * pipe. */ if (new_crtc_state->event && !new_crtc_state->active && !old_crtc_state->active) { drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] requesting event but off\n", crtc->base.id, crtc->name); return -EINVAL; } return 0; } static void drm_atomic_crtc_print_state(struct drm_printer *p, const struct drm_crtc_state *state) { struct drm_crtc *crtc = state->crtc; drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name); drm_printf(p, "\tenable=%d\n", state->enable); drm_printf(p, "\tactive=%d\n", state->active); drm_printf(p, "\tself_refresh_active=%d\n", state->self_refresh_active); drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed); drm_printf(p, "\tmode_changed=%d\n", state->mode_changed); drm_printf(p, "\tactive_changed=%d\n", state->active_changed); drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed); drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); drm_printf(p, "\tplane_mask=%x\n", state->plane_mask); drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask); drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask); drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode)); if (crtc->funcs->atomic_print_state) crtc->funcs->atomic_print_state(p, state); } static int drm_atomic_connector_check(struct drm_connector *connector, struct drm_connector_state *state) { struct drm_crtc_state *crtc_state; struct drm_writeback_job *writeback_job = state->writeback_job; const struct drm_display_info *info = &connector->display_info; state->max_bpc = info->bpc ? info->bpc : 8; if (connector->max_bpc_property) state->max_bpc = min(state->max_bpc, state->max_requested_bpc); if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job) return 0; if (writeback_job->fb && !state->crtc) { drm_dbg_atomic(connector->dev, "[CONNECTOR:%d:%s] framebuffer without CRTC\n", connector->base.id, connector->name); return -EINVAL; } if (state->crtc) crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc); if (writeback_job->fb && !crtc_state->active) { drm_dbg_atomic(connector->dev, "[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n", connector->base.id, connector->name, state->crtc->base.id); return -EINVAL; } if (!writeback_job->fb) { if (writeback_job->out_fence) { drm_dbg_atomic(connector->dev, "[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", connector->base.id, connector->name); return -EINVAL; } drm_writeback_cleanup_job(writeback_job); state->writeback_job = NULL; } return 0; } /** * drm_atomic_get_plane_state - get plane state * @state: global atomic state object * @plane: plane to get state object for * * This function returns the plane state for the given plane, allocating it if * needed. It will also grab the relevant plane lock to make sure that the state * is consistent. * * Returns: * Either the allocated state or the error code encoded into the pointer. When * the error is EDEADLK then the w/w mutex code has detected a deadlock and the * entire atomic sequence must be restarted. All other errors are fatal. */ struct drm_plane_state * drm_atomic_get_plane_state(struct drm_atomic_state *state, struct drm_plane *plane) { int ret, index = drm_plane_index(plane); struct drm_plane_state *plane_state; WARN_ON(!state->acquire_ctx); /* the legacy pointers should never be set */ WARN_ON(plane->fb); WARN_ON(plane->old_fb); WARN_ON(plane->crtc); plane_state = drm_atomic_get_existing_plane_state(state, plane); if (plane_state) return plane_state; ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx); if (ret) return ERR_PTR(ret); plane_state = plane->funcs->atomic_duplicate_state(plane); if (!plane_state) return ERR_PTR(-ENOMEM); state->planes[index].state = plane_state; state->planes[index].ptr = plane; state->planes[index].old_state = plane->state; state->planes[index].new_state = plane_state; plane_state->state = state; drm_dbg_atomic(plane->dev, "Added [PLANE:%d:%s] %p state to %p\n", plane->base.id, plane->name, plane_state, state); if (plane_state->crtc) { struct drm_crtc_state *crtc_state; crtc_state = drm_atomic_get_crtc_state(state, plane_state->crtc); if (IS_ERR(crtc_state)) return ERR_CAST(crtc_state); } return plane_state; } EXPORT_SYMBOL(drm_atomic_get_plane_state); static bool plane_switching_crtc(const struct drm_plane_state *old_plane_state, const struct drm_plane_state *new_plane_state) { if (!old_plane_state->crtc || !new_plane_state->crtc) return false; if (old_plane_state->crtc == new_plane_state->crtc) return false; /* This could be refined, but currently there's no helper or driver code * to implement direct switching of active planes nor userspace to take * advantage of more direct plane switching without the intermediate * full OFF state. */ return true; } /** * drm_atomic_plane_check - check plane state * @old_plane_state: old plane state to check * @new_plane_state: new plane state to check * * Provides core sanity checks for plane state. * * RETURNS: * Zero on success, error code on failure */ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, const struct drm_plane_state *new_plane_state) { struct drm_plane *plane = new_plane_state->plane; struct drm_crtc *crtc = new_plane_state->crtc; const struct drm_framebuffer *fb = new_plane_state->fb; unsigned int fb_width, fb_height; struct drm_mode_rect *clips; uint32_t num_clips; /* either *both* CRTC and FB must be set, or neither */ if (crtc && !fb) { drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] CRTC set but no FB\n", plane->base.id, plane->name); return -EINVAL; } else if (fb && !crtc) { drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] FB set but no CRTC\n", plane->base.id, plane->name); return -EINVAL; } /* if disabled, we don't care about the rest of the state: */ if (!crtc) return 0; /* Check whether this plane is usable on this CRTC */ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { drm_dbg_atomic(plane->dev, "Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n", crtc->base.id, crtc->name, plane->base.id, plane->name); return -EINVAL; } /* Check whether this plane supports the fb pixel format. */ if (!drm_plane_has_format(plane, fb->format->format, fb->modifier)) { drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n", plane->base.id, plane->name, &fb->format->format, fb->modifier); return -EINVAL; } /* Give drivers some help against integer overflows */ if (new_plane_state->crtc_w > INT_MAX || new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w || new_plane_state->crtc_h > INT_MAX || new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) { drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n", plane->base.id, plane->name, new_plane_state->crtc_w, new_plane_state->crtc_h, new_plane_state->crtc_x, new_plane_state->crtc_y); return -ERANGE; } fb_width = fb->width << 16; fb_height = fb->height << 16; /* Make sure source coordinates are inside the fb. */ if (new_plane_state->src_w > fb_width || new_plane_state->src_x > fb_width - new_plane_state->src_w || new_plane_state->src_h > fb_height || new_plane_state->src_y > fb_height - new_plane_state->src_h) { drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] invalid source coordinates " "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", plane->base.id, plane->name, new_plane_state->src_w >> 16, ((new_plane_state->src_w & 0xffff) * 15625) >> 10, new_plane_state->src_h >> 16, ((new_plane_state->src_h & 0xffff) * 15625) >> 10, new_plane_state->src_x >> 16, ((new_plane_state->src_x & 0xffff) * 15625) >> 10, new_plane_state->src_y >> 16, ((new_plane_state->src_y & 0xffff) * 15625) >> 10, fb->width, fb->height); return -ENOSPC; } clips = __drm_plane_get_damage_clips(new_plane_state); num_clips = drm_plane_get_damage_clips_count(new_plane_state); /* Make sure damage clips are valid and inside the fb. */ while (num_clips > 0) { if (clips->x1 >= clips->x2 || clips->y1 >= clips->y2 || clips->x1 < 0 || clips->y1 < 0 || clips->x2 > fb_width || clips->y2 > fb_height) { drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] invalid damage clip %d %d %d %d\n", plane->base.id, plane->name, clips->x1, clips->y1, clips->x2, clips->y2); return -EINVAL; } clips++; num_clips--; } if (plane_switching_crtc(old_plane_state, new_plane_state)) { drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] switching CRTC directly\n", plane->base.id, plane->name); return -EINVAL; } return 0; } static void drm_atomic_plane_print_state(struct drm_printer *p, const struct drm_plane_state *state) { struct drm_plane *plane = state->plane; struct drm_rect src = drm_plane_state_src(state); struct drm_rect dest = drm_plane_state_dest(state); drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name); drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); if (state->fb) drm_framebuffer_print_info(p, 2, state->fb); drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src)); drm_printf(p, "\trotation=%x\n", state->rotation); drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos); drm_printf(p, "\tcolor-encoding=%s\n", drm_get_color_encoding_name(state->color_encoding)); drm_printf(p, "\tcolor-range=%s\n", drm_get_color_range_name(state->color_range)); drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); if (plane->funcs->atomic_print_state) plane->funcs->atomic_print_state(p, state); } /** * DOC: handling driver private state * * Very often the DRM objects exposed to userspace in the atomic modeset api * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the * underlying hardware. Especially for any kind of shared resources (e.g. shared * clocks, scaler units, bandwidth and fifo limits shared among a group of * planes or CRTCs, and so on) it makes sense to model these as independent * objects. Drivers then need to do similar state tracking and commit ordering for * such private (since not exposed to userspace) objects as the atomic core and * helpers already provide for connectors, planes and CRTCs. * * To make this easier on drivers the atomic core provides some support to track * driver private state objects using struct &drm_private_obj, with the * associated state struct &drm_private_state. * * Similar to userspace-exposed objects, private state structures can be * acquired by calling drm_atomic_get_private_obj_state(). This also takes care * of locking, hence drivers should not have a need to call drm_modeset_lock() * directly. Sequence of the actual hardware state commit is not handled, * drivers might need to keep track of struct drm_crtc_commit within subclassed * structure of &drm_private_state as necessary, e.g. similar to * &drm_plane_state.commit. See also &drm_atomic_state.fake_commit. * * All private state structures contained in a &drm_atomic_state update can be * iterated using for_each_oldnew_private_obj_in_state(), * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state(). * Drivers are recommended to wrap these for each type of driver private state * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at * least if they want to iterate over all objects of a given type. * * An earlier way to handle driver private state was by subclassing struct * &drm_atomic_state. But since that encourages non-standard ways to implement * the check/commit split atomic requires (by using e.g. "check and rollback or * commit instead" of "duplicate state, check, then either commit or release * duplicated state) it is deprecated in favour of using &drm_private_state. */ /** * drm_atomic_private_obj_init - initialize private object * @dev: DRM device this object will be attached to * @obj: private object * @state: initial private object state * @funcs: pointer to the struct of function pointers that identify the object * type * * Initialize the private object, which can be embedded into any * driver private object that needs its own atomic state. */ void drm_atomic_private_obj_init(struct drm_device *dev, struct drm_private_obj *obj, struct drm_private_state *state, const struct drm_private_state_funcs *funcs) { memset(obj, 0, sizeof(*obj)); drm_modeset_lock_init(&obj->lock); obj->state = state; obj->funcs = funcs; list_add_tail(&obj->head, &dev->mode_config.privobj_list); state->obj = obj; } EXPORT_SYMBOL(drm_atomic_private_obj_init); /** * drm_atomic_private_obj_fini - finalize private object * @obj: private object * * Finalize the private object. */ void drm_atomic_private_obj_fini(struct drm_private_obj *obj) { list_del(&obj->head); obj->funcs->atomic_destroy_state(obj, obj->state); drm_modeset_lock_fini(&obj->lock); } EXPORT_SYMBOL(drm_atomic_private_obj_fini); /** * drm_atomic_get_private_obj_state - get private object state * @state: global atomic state * @obj: private object to get the state for * * This function returns the private object state for the given private object, * allocating the state if needed. It will also grab the relevant private * object lock to make sure that the state is consistent. * * RETURNS: * Either the allocated state or the error code encoded into a pointer. */ struct drm_private_state * drm_atomic_get_private_obj_state(struct drm_atomic_state *state, struct drm_private_obj *obj) { int index, num_objs, i, ret; size_t size; struct __drm_private_objs_state *arr; struct drm_private_state *obj_state; for (i = 0; i < state->num_private_objs; i++) if (obj == state->private_objs[i].ptr) return state->private_objs[i].state; ret = drm_modeset_lock(&obj->lock, state->acquire_ctx); if (ret) return ERR_PTR(ret); num_objs = state->num_private_objs + 1; size = sizeof(*state->private_objs) * num_objs; arr = krealloc(state->private_objs, size, GFP_KERNEL); if (!arr) return ERR_PTR(-ENOMEM); state->private_objs = arr; index = state->num_private_objs; memset(&state->private_objs[index], 0, sizeof(*state->private_objs)); obj_state = obj->funcs->atomic_duplicate_state(obj); if (!obj_state) return ERR_PTR(-ENOMEM); state->private_objs[index].state = obj_state; state->private_objs[index].old_state = obj->state; state->private_objs[index].new_state = obj_state; state->private_objs[index].ptr = obj; obj_state->state = state; state->num_private_objs = num_objs; drm_dbg_atomic(state->dev, "Added new private object %p state %p to %p\n", obj, obj_state, state); return obj_state; } EXPORT_SYMBOL(drm_atomic_get_private_obj_state); /** * drm_atomic_get_old_private_obj_state * @state: global atomic state object * @obj: private_obj to grab * * This function returns the old private object state for the given private_obj, * or NULL if the private_obj is not part of the global atomic state. */ struct drm_private_state * drm_atomic_get_old_private_obj_state(const struct drm_atomic_state *state, struct drm_private_obj *obj) { int i; for (i = 0; i < state->num_private_objs; i++) if (obj == state->private_objs[i].ptr) return state->private_objs[i].old_state; return NULL; } EXPORT_SYMBOL(drm_atomic_get_old_private_obj_state); /** * drm_atomic_get_new_private_obj_state * @state: global atomic state object * @obj: private_obj to grab * * This function returns the new private object state for the given private_obj, * or NULL if the private_obj is not part of the global atomic state. */ struct drm_private_state * drm_atomic_get_new_private_obj_state(const struct drm_atomic_state *state, struct drm_private_obj *obj) { int i; for (i = 0; i < state->num_private_objs; i++) if (obj == state->private_objs[i].ptr) return state->private_objs[i].new_state; return NULL; } EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state); /** * drm_atomic_get_old_connector_for_encoder - Get old connector for an encoder * @state: Atomic state * @encoder: The encoder to fetch the connector state for * * This function finds and returns the connector that was connected to @encoder * as specified by the @state. * * If there is no connector in @state which previously had @encoder connected to * it, this function will return NULL. While this may seem like an invalid use * case, it is sometimes useful to differentiate commits which had no prior * connectors attached to @encoder vs ones that did (and to inspect their * state). This is especially true in enable hooks because the pipeline has * changed. * * If you don't have access to the atomic state, see * drm_atomic_get_connector_for_encoder(). * * Returns: The old connector connected to @encoder, or NULL if the encoder is * not connected. */ struct drm_connector * drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state, struct drm_encoder *encoder) { struct drm_connector_state *conn_state; struct drm_connector *connector; unsigned int i; for_each_old_connector_in_state(state, connector, conn_state, i) { if (conn_state->best_encoder == encoder) return connector; } return NULL; } EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder); /** * drm_atomic_get_new_connector_for_encoder - Get new connector for an encoder * @state: Atomic state * @encoder: The encoder to fetch the connector state for * * This function finds and returns the connector that will be connected to * @encoder as specified by the @state. * * If there is no connector in @state which will have @encoder connected to it, * this function will return NULL. While this may seem like an invalid use case, * it is sometimes useful to differentiate commits which have no connectors * attached to @encoder vs ones that do (and to inspect their state). This is * especially true in disable hooks because the pipeline will change. * * If you don't have access to the atomic state, see * drm_atomic_get_connector_for_encoder(). * * Returns: The new connector connected to @encoder, or NULL if the encoder is * not connected. */ struct drm_connector * drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state, struct drm_encoder *encoder) { struct drm_connector_state *conn_state; struct drm_connector *connector; unsigned int i; for_each_new_connector_in_state(state, connector, conn_state, i) { if (conn_state->best_encoder == encoder) return connector; } return NULL; } EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder); /** * drm_atomic_get_connector_for_encoder - Get connector currently assigned to an encoder * @encoder: The encoder to find the connector of * @ctx: Modeset locking context * * This function finds and returns the connector currently assigned to * an @encoder. * * It is similar to the drm_atomic_get_old_connector_for_encoder() and * drm_atomic_get_new_connector_for_encoder() helpers, but doesn't * require access to the atomic state. If you have access to it, prefer * using these. This helper is typically useful in situations where you * don't have access to the atomic state, like detect, link repair, * threaded interrupt handlers, or hooks from other frameworks (ALSA, * CEC, etc.). * * Returns: * The connector connected to @encoder, or an error pointer otherwise. * When the error is EDEADLK, a deadlock has been detected and the * sequence must be restarted. */ struct drm_connector * drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder, struct drm_modeset_acquire_ctx *ctx) { struct drm_connector_list_iter conn_iter; struct drm_connector *out_connector = ERR_PTR(-EINVAL); struct drm_connector *connector; struct drm_device *dev = encoder->dev; int ret; ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx); if (ret) return ERR_PTR(ret); drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { if (!connector->state) continue; if (encoder == connector->state->best_encoder) { out_connector = connector; break; } } drm_connector_list_iter_end(&conn_iter); drm_modeset_unlock(&dev->mode_config.connection_mutex); return out_connector; } EXPORT_SYMBOL(drm_atomic_get_connector_for_encoder); /** * drm_atomic_get_old_crtc_for_encoder - Get old crtc for an encoder * @state: Atomic state * @encoder: The encoder to fetch the crtc state for * * This function finds and returns the crtc that was connected to @encoder * as specified by the @state. * * Returns: The old crtc connected to @encoder, or NULL if the encoder is * not connected. */ struct drm_crtc * drm_atomic_get_old_crtc_for_encoder(struct drm_atomic_state *state, struct drm_encoder *encoder) { struct drm_connector *connector; struct drm_connector_state *conn_state; connector = drm_atomic_get_old_connector_for_encoder(state, encoder); if (!connector) return NULL; conn_state = drm_atomic_get_old_connector_state(state, connector); if (!conn_state) return NULL; return conn_state->crtc; } EXPORT_SYMBOL(drm_atomic_get_old_crtc_for_encoder); /** * drm_atomic_get_new_crtc_for_encoder - Get new crtc for an encoder * @state: Atomic state * @encoder: The encoder to fetch the crtc state for * * This function finds and returns the crtc that will be connected to @encoder * as specified by the @state. * * Returns: The new crtc connected to @encoder, or NULL if the encoder is * not connected. */ struct drm_crtc * drm_atomic_get_new_crtc_for_encoder(struct drm_atomic_state *state, struct drm_encoder *encoder) { struct drm_connector *connector; struct drm_connector_state *conn_state; connector = drm_atomic_get_new_connector_for_encoder(state, encoder); if (!connector) return NULL; conn_state = drm_atomic_get_new_connector_state(state, connector); if (!conn_state) return NULL; return conn_state->crtc; } EXPORT_SYMBOL(drm_atomic_get_new_crtc_for_encoder); /** * drm_atomic_get_connector_state - get connector state * @state: global atomic state object * @connector: connector to get state object for * * This function returns the connector state for the given connector, * allocating it if needed. It will also grab the relevant connector lock to * make sure that the state is consistent. * * Returns: * Either the allocated state or the error code encoded into the pointer. When * the error is EDEADLK then the w/w mutex code has detected a deadlock and the * entire atomic sequence must be restarted. All other errors are fatal. */ struct drm_connector_state * drm_atomic_get_connector_state(struct drm_atomic_state *state, struct drm_connector *connector) { int ret, index; struct drm_mode_config *config = &connector->dev->mode_config; struct drm_connector_state *connector_state; WARN_ON(!state->acquire_ctx); ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); if (ret) return ERR_PTR(ret); index = drm_connector_index(connector); if (index >= state->num_connector) { struct __drm_connnectors_state *c; int alloc = max(index + 1, config->num_connector); c = krealloc_array(state->connectors, alloc, sizeof(*state->connectors), GFP_KERNEL); if (!c) return ERR_PTR(-ENOMEM); state->connectors = c; memset(&state->connectors[state->num_connector], 0, sizeof(*state->connectors) * (alloc - state->num_connector)); state->num_connector = alloc; } if (state->connectors[index].state) return state->connectors[index].state; connector_state = connector->funcs->atomic_duplicate_state(connector); if (!connector_state) return ERR_PTR(-ENOMEM); drm_connector_get(connector); state->connectors[index].state = connector_state; state->connectors[index].old_state = connector->state; state->connectors[index].new_state = connector_state; state->connectors[index].ptr = connector; connector_state->state = state; drm_dbg_atomic(connector->dev, "Added [CONNECTOR:%d:%s] %p state to %p\n", connector->base.id, connector->name, connector_state, state); if (connector_state->crtc) { struct drm_crtc_state *crtc_state; crtc_state = drm_atomic_get_crtc_state(state, connector_state->crtc); if (IS_ERR(crtc_state)) return ERR_CAST(crtc_state); } return connector_state; } EXPORT_SYMBOL(drm_atomic_get_connector_state); static void drm_atomic_connector_print_state(struct drm_printer *p, const struct drm_connector_state *state) { struct drm_connector *connector = state->connector; drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware); drm_printf(p, "\tinterlace_allowed=%d\n", connector->interlace_allowed); drm_printf(p, "\tycbcr_420_allowed=%d\n", connector->ycbcr_420_allowed); drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc); drm_printf(p, "\tcolorspace=%s\n", drm_get_colorspace_name(state->colorspace)); if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { drm_printf(p, "\tbroadcast_rgb=%s\n", drm_hdmi_connector_get_broadcast_rgb_name(state->hdmi.broadcast_rgb)); drm_printf(p, "\tis_limited_range=%c\n", state->hdmi.is_limited_range ? 'y' : 'n'); drm_printf(p, "\toutput_bpc=%u\n", state->hdmi.output_bpc); drm_printf(p, "\toutput_format=%s\n", drm_hdmi_connector_get_output_format_name(state->hdmi.output_format)); drm_printf(p, "\ttmds_char_rate=%llu\n", state->hdmi.tmds_char_rate); } if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) if (state->writeback_job && state->writeback_job->fb) drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id); if (connector->funcs->atomic_print_state) connector->funcs->atomic_print_state(p, state); } /** * drm_atomic_get_bridge_state - get bridge state * @state: global atomic state object * @bridge: bridge to get state object for * * This function returns the bridge state for the given bridge, allocating it * if needed. It will also grab the relevant bridge lock to make sure that the * state is consistent. * * Returns: * Either the allocated state or the error code encoded into the pointer. When * the error is EDEADLK then the w/w mutex code has detected a deadlock and the * entire atomic sequence must be restarted. */ struct drm_bridge_state * drm_atomic_get_bridge_state(struct drm_atomic_state *state, struct drm_bridge *bridge) { struct drm_private_state *obj_state; obj_state = drm_atomic_get_private_obj_state(state, &bridge->base); if (IS_ERR(obj_state)) return ERR_CAST(obj_state); return drm_priv_to_bridge_state(obj_state); } EXPORT_SYMBOL(drm_atomic_get_bridge_state); /** * drm_atomic_get_old_bridge_state - get old bridge state, if it exists * @state: global atomic state object * @bridge: bridge to grab * * This function returns the old bridge state for the given bridge, or NULL if * the bridge is not part of the global atomic state. */ struct drm_bridge_state * drm_atomic_get_old_bridge_state(const struct drm_atomic_state *state, struct drm_bridge *bridge) { struct drm_private_state *obj_state; obj_state = drm_atomic_get_old_private_obj_state(state, &bridge->base); if (!obj_state) return NULL; return drm_priv_to_bridge_state(obj_state); } EXPORT_SYMBOL(drm_atomic_get_old_bridge_state); /** * drm_atomic_get_new_bridge_state - get new bridge state, if it exists * @state: global atomic state object * @bridge: bridge to grab * * This function returns the new bridge state for the given bridge, or NULL if * the bridge is not part of the global atomic state. */ struct drm_bridge_state * drm_atomic_get_new_bridge_state(const struct drm_atomic_state *state, struct drm_bridge *bridge) { struct drm_private_state *obj_state; obj_state = drm_atomic_get_new_private_obj_state(state, &bridge->base); if (!obj_state) return NULL; return drm_priv_to_bridge_state(obj_state); } EXPORT_SYMBOL(drm_atomic_get_new_bridge_state); /** * drm_atomic_add_encoder_bridges - add bridges attached to an encoder * @state: atomic state * @encoder: DRM encoder * * This function adds all bridges attached to @encoder. This is needed to add * bridge states to @state and make them available when * &drm_bridge_funcs.atomic_check(), &drm_bridge_funcs.atomic_pre_enable(), * &drm_bridge_funcs.atomic_enable(), * &drm_bridge_funcs.atomic_disable_post_disable() are called. * * Returns: * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK * then the w/w mutex code has detected a deadlock and the entire atomic * sequence must be restarted. All other errors are fatal. */ int drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, struct drm_encoder *encoder) { struct drm_bridge_state *bridge_state; struct drm_bridge *bridge; if (!encoder) return 0; drm_dbg_atomic(encoder->dev, "Adding all bridges for [encoder:%d:%s] to %p\n", encoder->base.id, encoder->name, state); drm_for_each_bridge_in_chain(encoder, bridge) { /* Skip bridges that don't implement the atomic state hooks. */ if (!bridge->funcs->atomic_duplicate_state) continue; bridge_state = drm_atomic_get_bridge_state(state, bridge); if (IS_ERR(bridge_state)) return PTR_ERR(bridge_state); } return 0; } EXPORT_SYMBOL(drm_atomic_add_encoder_bridges); /** * drm_atomic_add_affected_connectors - add connectors for CRTC * @state: atomic state * @crtc: DRM CRTC * * This function walks the current configuration and adds all connectors * currently using @crtc to the atomic configuration @state. Note that this * function must acquire the connection mutex. This can potentially cause * unneeded serialization if the update is just for the planes on one CRTC. Hence * drivers and helpers should only call this when really needed (e.g. when a * full modeset needs to happen due to some change). * * Returns: * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK * then the w/w mutex code has detected a deadlock and the entire atomic * sequence must be restarted. All other errors are fatal. */ int drm_atomic_add_affected_connectors(struct drm_atomic_state *state, struct drm_crtc *crtc) { struct drm_mode_config *config = &state->dev->mode_config; struct drm_connector *connector; struct drm_connector_state *conn_state; struct drm_connector_list_iter conn_iter; struct drm_crtc_state *crtc_state; int ret; crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); if (ret) return ret; drm_dbg_atomic(crtc->dev, "Adding all current connectors for [CRTC:%d:%s] to %p\n", crtc->base.id, crtc->name, state); /* * Changed connectors are already in @state, so only need to look * at the connector_mask in crtc_state. */ drm_connector_list_iter_begin(state->dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { if (!(crtc_state->connector_mask & drm_connector_mask(connector))) continue; conn_state = drm_atomic_get_connector_state(state, connector); if (IS_ERR(conn_state)) { drm_connector_list_iter_end(&conn_iter); return PTR_ERR(conn_state); } } drm_connector_list_iter_end(&conn_iter); return 0; } EXPORT_SYMBOL(drm_atomic_add_affected_connectors); /** * drm_atomic_add_affected_planes - add planes for CRTC * @state: atomic state * @crtc: DRM CRTC * * This function walks the current configuration and adds all planes * currently used by @crtc to the atomic configuration @state. This is useful * when an atomic commit also needs to check all currently enabled plane on * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC * to avoid special code to force-enable all planes. * * Since acquiring a plane state will always also acquire the w/w mutex of the * current CRTC for that plane (if there is any) adding all the plane states for * a CRTC will not reduce parallelism of atomic updates. * * Returns: * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK * then the w/w mutex code has detected a deadlock and the entire atomic * sequence must be restarted. All other errors are fatal. */ int drm_atomic_add_affected_planes(struct drm_atomic_state *state, struct drm_crtc *crtc) { const struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); struct drm_plane *plane; WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); drm_dbg_atomic(crtc->dev, "Adding all current planes for [CRTC:%d:%s] to %p\n", crtc->base.id, crtc->name, state); drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) { struct drm_plane_state *plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) return PTR_ERR(plane_state); } return 0; } EXPORT_SYMBOL(drm_atomic_add_affected_planes); /** * drm_atomic_check_only - check whether a given config would work * @state: atomic configuration to check * * Note that this function can return -EDEADLK if the driver needed to acquire * more locks but encountered a deadlock. The caller must then do the usual w/w * backoff dance and restart. All other errors are fatal. * * Returns: * 0 on success, negative error code on failure. */ int drm_atomic_check_only(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct drm_mode_config *config = &dev->mode_config; struct drm_plane *plane; struct drm_plane_state *old_plane_state; struct drm_plane_state *new_plane_state; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; struct drm_crtc_state *new_crtc_state; struct drm_connector *conn; struct drm_connector_state *conn_state; unsigned int requested_crtc = 0; unsigned int affected_crtc = 0; int i, ret = 0; drm_dbg_atomic(dev, "checking %p\n", state); for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->enable) requested_crtc |= drm_crtc_mask(crtc); } for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { ret = drm_atomic_plane_check(old_plane_state, new_plane_state); if (ret) { drm_dbg_atomic(dev, "[PLANE:%d:%s] atomic core check failed\n", plane->base.id, plane->name); return ret; } } for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state); if (ret) { drm_dbg_atomic(dev, "[CRTC:%d:%s] atomic core check failed\n", crtc->base.id, crtc->name); return ret; } } for_each_new_connector_in_state(state, conn, conn_state, i) { ret = drm_atomic_connector_check(conn, conn_state); if (ret) { drm_dbg_atomic(dev, "[CONNECTOR:%d:%s] atomic core check failed\n", conn->base.id, conn->name); return ret; } } if (config->funcs->atomic_check) { ret = config->funcs->atomic_check(state->dev, state); if (ret) { drm_dbg_atomic(dev, "atomic driver check for %p failed: %d\n", state, ret); return ret; } } if (!state->allow_modeset) { for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { drm_dbg_atomic(dev, "[CRTC:%d:%s] requires full modeset\n", crtc->base.id, crtc->name); return -EINVAL; } } } for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->enable) affected_crtc |= drm_crtc_mask(crtc); } /* * For commits that allow modesets drivers can add other CRTCs to the * atomic commit, e.g. when they need to reallocate global resources. * This can cause spurious EBUSY, which robs compositors of a very * effective sanity check for their drawing loop. Therefor only allow * drivers to add unrelated CRTC states for modeset commits. * * FIXME: Should add affected_crtc mask to the ATOMIC IOCTL as an output * so compositors know what's going on. */ if (affected_crtc != requested_crtc) { drm_dbg_atomic(dev, "driver added CRTC to commit: requested 0x%x, affected 0x%0x\n", requested_crtc, affected_crtc); WARN(!state->allow_modeset, "adding CRTC not allowed without modesets: requested 0x%x, affected 0x%0x\n", requested_crtc, affected_crtc); } return 0; } EXPORT_SYMBOL(drm_atomic_check_only); /** * drm_atomic_commit - commit configuration atomically * @state: atomic configuration to check * * Note that this function can return -EDEADLK if the driver needed to acquire * more locks but encountered a deadlock. The caller must then do the usual w/w * backoff dance and restart. All other errors are fatal. * * This function will take its own reference on @state. * Callers should always release their reference with drm_atomic_state_put(). * * Returns: * 0 on success, negative error code on failure. */ int drm_atomic_commit(struct drm_atomic_state *state) { struct drm_mode_config *config = &state->dev->mode_config; struct drm_printer p = drm_info_printer(state->dev->dev); int ret; if (drm_debug_enabled(DRM_UT_STATE)) drm_atomic_print_new_state(state, &p); ret = drm_atomic_check_only(state); if (ret) return ret; drm_dbg_atomic(state->dev, "committing %p\n", state); return config->funcs->atomic_commit(state->dev, state, false); } EXPORT_SYMBOL(drm_atomic_commit); /** * drm_atomic_nonblocking_commit - atomic nonblocking commit * @state: atomic configuration to check * * Note that this function can return -EDEADLK if the driver needed to acquire * more locks but encountered a deadlock. The caller must then do the usual w/w * backoff dance and restart. All other errors are fatal. * * This function will take its own reference on @state. * Callers should always release their reference with drm_atomic_state_put(). * * Returns: * 0 on success, negative error code on failure. */ int drm_atomic_nonblocking_commit(struct drm_atomic_state *state) { struct drm_mode_config *config = &state->dev->mode_config; int ret; ret = drm_atomic_check_only(state); if (ret) return ret; drm_dbg_atomic(state->dev, "committing %p nonblocking\n", state); return config->funcs->atomic_commit(state->dev, state, true); } EXPORT_SYMBOL(drm_atomic_nonblocking_commit); /* just used from drm-client and atomic-helper: */ int __drm_atomic_helper_disable_plane(struct drm_plane *plane, struct drm_plane_state *plane_state) { int ret; ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); if (ret != 0) return ret; drm_atomic_set_fb_for_plane(plane_state, NULL); plane_state->crtc_x = 0; plane_state->crtc_y = 0; plane_state->crtc_w = 0; plane_state->crtc_h = 0; plane_state->src_x = 0; plane_state->src_y = 0; plane_state->src_w = 0; plane_state->src_h = 0; return 0; } EXPORT_SYMBOL(__drm_atomic_helper_disable_plane); static int update_output_state(struct drm_atomic_state *state, struct drm_mode_set *set) { struct drm_device *dev = set->crtc->dev; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state; struct drm_connector *connector; struct drm_connector_state *new_conn_state; int ret, i; ret = drm_modeset_lock(&dev->mode_config.connection_mutex, state->acquire_ctx); if (ret) return ret; /* First disable all connectors on the target crtc. */ ret = drm_atomic_add_affected_connectors(state, set->crtc); if (ret) return ret; for_each_new_connector_in_state(state, connector, new_conn_state, i) { if (new_conn_state->crtc == set->crtc) { ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL); if (ret) return ret; /* Make sure legacy setCrtc always re-trains */ new_conn_state->link_status = DRM_LINK_STATUS_GOOD; } } /* Then set all connectors from set->connectors on the target crtc */ for (i = 0; i < set->num_connectors; i++) { new_conn_state = drm_atomic_get_connector_state(state, set->connectors[i]); if (IS_ERR(new_conn_state)) return PTR_ERR(new_conn_state); ret = drm_atomic_set_crtc_for_connector(new_conn_state, set->crtc); if (ret) return ret; } for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { /* * Don't update ->enable for the CRTC in the set_config request, * since a mismatch would indicate a bug in the upper layers. * The actual modeset code later on will catch any * inconsistencies here. */ if (crtc == set->crtc) continue; if (!new_crtc_state->connector_mask) { ret = drm_atomic_set_mode_prop_for_crtc(new_crtc_state, NULL); if (ret < 0) return ret; new_crtc_state->active = false; } } return 0; } /* just used from drm-client and atomic-helper: */ int __drm_atomic_helper_set_config(struct drm_mode_set *set, struct drm_atomic_state *state) { struct drm_crtc_state *crtc_state; struct drm_plane_state *primary_state; struct drm_crtc *crtc = set->crtc; int hdisplay, vdisplay; int ret; crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); primary_state = drm_atomic_get_plane_state(state, crtc->primary); if (IS_ERR(primary_state)) return PTR_ERR(primary_state); if (!set->mode) { WARN_ON(set->fb); WARN_ON(set->num_connectors); ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL); if (ret != 0) return ret; crtc_state->active = false; ret = drm_atomic_set_crtc_for_plane(primary_state, NULL); if (ret != 0) return ret; drm_atomic_set_fb_for_plane(primary_state, NULL); goto commit; } WARN_ON(!set->fb); WARN_ON(!set->num_connectors); ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode); if (ret != 0) return ret; crtc_state->active = true; ret = drm_atomic_set_crtc_for_plane(primary_state, crtc); if (ret != 0) return ret; drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay); drm_atomic_set_fb_for_plane(primary_state, set->fb); primary_state->crtc_x = 0; primary_state->crtc_y = 0; primary_state->crtc_w = hdisplay; primary_state->crtc_h = vdisplay; primary_state->src_x = set->x << 16; primary_state->src_y = set->y << 16; if (drm_rotation_90_or_270(primary_state->rotation)) { primary_state->src_w = vdisplay << 16; primary_state->src_h = hdisplay << 16; } else { primary_state->src_w = hdisplay << 16; primary_state->src_h = vdisplay << 16; } commit: ret = update_output_state(state, set); if (ret) return ret; return 0; } EXPORT_SYMBOL(__drm_atomic_helper_set_config); static void drm_atomic_private_obj_print_state(struct drm_printer *p, const struct drm_private_state *state) { struct drm_private_obj *obj = state->obj; if (obj->funcs->atomic_print_state) obj->funcs->atomic_print_state(p, state); } /** * drm_atomic_print_new_state - prints drm atomic state * @state: atomic configuration to check * @p: drm printer * * This functions prints the drm atomic state snapshot using the drm printer * which is passed to it. This snapshot can be used for debugging purposes. * * Note that this function looks into the new state objects and hence its not * safe to be used after the call to drm_atomic_helper_commit_hw_done(). */ void drm_atomic_print_new_state(const struct drm_atomic_state *state, struct drm_printer *p) { struct drm_plane *plane; struct drm_plane_state *plane_state; struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; struct drm_connector *connector; struct drm_connector_state *connector_state; struct drm_private_obj *obj; struct drm_private_state *obj_state; int i; if (!p) { drm_err(state->dev, "invalid drm printer\n"); return; } drm_dbg_atomic(state->dev, "checking %p\n", state); for_each_new_plane_in_state(state, plane, plane_state, i) drm_atomic_plane_print_state(p, plane_state); for_each_new_crtc_in_state(state, crtc, crtc_state, i) drm_atomic_crtc_print_state(p, crtc_state); for_each_new_connector_in_state(state, connector, connector_state, i) drm_atomic_connector_print_state(p, connector_state); for_each_new_private_obj_in_state(state, obj, obj_state, i) drm_atomic_private_obj_print_state(p, obj_state); } EXPORT_SYMBOL(drm_atomic_print_new_state); static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, bool take_locks) { struct drm_mode_config *config = &dev->mode_config; struct drm_plane *plane; struct drm_crtc *crtc; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct drm_private_obj *obj; if (!drm_drv_uses_atomic_modeset(dev)) return; list_for_each_entry(plane, &config->plane_list, head) { if (take_locks) drm_modeset_lock(&plane->mutex, NULL); drm_atomic_plane_print_state(p, plane->state); if (take_locks) drm_modeset_unlock(&plane->mutex); } list_for_each_entry(crtc, &config->crtc_list, head) { if (take_locks) drm_modeset_lock(&crtc->mutex, NULL); drm_atomic_crtc_print_state(p, crtc->state); if (take_locks) drm_modeset_unlock(&crtc->mutex); } drm_connector_list_iter_begin(dev, &conn_iter); if (take_locks) drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); drm_for_each_connector_iter(connector, &conn_iter) drm_atomic_connector_print_state(p, connector->state); if (take_locks) drm_modeset_unlock(&dev->mode_config.connection_mutex); drm_connector_list_iter_end(&conn_iter); list_for_each_entry(obj, &config->privobj_list, head) { if (take_locks) drm_modeset_lock(&obj->lock, NULL); drm_atomic_private_obj_print_state(p, obj->state); if (take_locks) drm_modeset_unlock(&obj->lock); } } /** * drm_state_dump - dump entire device atomic state * @dev: the drm device * @p: where to print the state to * * Just for debugging. Drivers might want an option to dump state * to dmesg in case of error irq's. (Hint, you probably want to * ratelimit this!) * * The caller must wrap this drm_modeset_lock_all_ctx() and * drm_modeset_drop_locks(). If this is called from error irq handler, it should * not be enabled by default - if you are debugging errors you might * not care that this is racey, but calling this without all modeset locks held * is inherently unsafe. */ void drm_state_dump(struct drm_device *dev, struct drm_printer *p) { __drm_state_dump(dev, p, false); } EXPORT_SYMBOL(drm_state_dump); #ifdef CONFIG_DEBUG_FS static int drm_state_info(struct seq_file *m, void *data) { struct drm_debugfs_entry *entry = m->private; struct drm_device *dev = entry->dev; struct drm_printer p = drm_seq_file_printer(m); __drm_state_dump(dev, &p, true); return 0; } /* any use in debugfs files to dump individual planes/crtc/etc? */ static const struct drm_debugfs_info drm_atomic_debugfs_list[] = { {"state", drm_state_info, 0}, }; void drm_atomic_debugfs_init(struct drm_device *dev) { drm_debugfs_add_files(dev, drm_atomic_debugfs_list, ARRAY_SIZE(drm_atomic_debugfs_list)); } #endif |
| 20 20 10 7 3 8 10 10 16 8 15 7 4 6 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 | // SPDX-License-Identifier: GPL-2.0-only /* * vMTRR implementation * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * Copyright(C) 2015 Intel Corporation. * * Authors: * Yaniv Kamay <yaniv@qumranet.com> * Avi Kivity <avi@qumranet.com> * Marcelo Tosatti <mtosatti@redhat.com> * Paolo Bonzini <pbonzini@redhat.com> * Xiao Guangrong <guangrong.xiao@linux.intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kvm_host.h> #include <asm/mtrr.h> #include "cpuid.h" #include "x86.h" static u64 *find_mtrr(struct kvm_vcpu *vcpu, unsigned int msr) { int index; switch (msr) { case MTRRphysBase_MSR(0) ... MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1): index = msr - MTRRphysBase_MSR(0); return &vcpu->arch.mtrr_state.var[index]; case MSR_MTRRfix64K_00000: return &vcpu->arch.mtrr_state.fixed_64k; case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_A0000: index = msr - MSR_MTRRfix16K_80000; return &vcpu->arch.mtrr_state.fixed_16k[index]; case MSR_MTRRfix4K_C0000: case MSR_MTRRfix4K_C8000: case MSR_MTRRfix4K_D0000: case MSR_MTRRfix4K_D8000: case MSR_MTRRfix4K_E0000: case MSR_MTRRfix4K_E8000: case MSR_MTRRfix4K_F0000: case MSR_MTRRfix4K_F8000: index = msr - MSR_MTRRfix4K_C0000; return &vcpu->arch.mtrr_state.fixed_4k[index]; case MSR_MTRRdefType: return &vcpu->arch.mtrr_state.deftype; default: break; } return NULL; } static bool valid_mtrr_type(unsigned t) { return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ } static bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) { int i; u64 mask; if (msr == MSR_MTRRdefType) { if (data & ~0xcff) return false; return valid_mtrr_type(data & 0xff); } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { for (i = 0; i < 8 ; i++) if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) return false; return true; } /* variable MTRRs */ if (WARN_ON_ONCE(!(msr >= MTRRphysBase_MSR(0) && msr <= MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1)))) return false; mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu); if ((msr & 1) == 0) { /* MTRR base */ if (!valid_mtrr_type(data & 0xff)) return false; mask |= 0xf00; } else { /* MTRR mask */ mask |= 0x7ff; } return (data & mask) == 0; } int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 *mtrr; mtrr = find_mtrr(vcpu, msr); if (!mtrr) return 1; if (!kvm_mtrr_valid(vcpu, msr, data)) return 1; *mtrr = data; return 0; } int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 *mtrr; /* MSR_MTRRcap is a readonly MSR. */ if (msr == MSR_MTRRcap) { /* * SMRR = 0 * WC = 1 * FIX = 1 * VCNT = KVM_NR_VAR_MTRR */ *pdata = 0x500 | KVM_NR_VAR_MTRR; return 0; } mtrr = find_mtrr(vcpu, msr); if (!mtrr) return 1; *pdata = *mtrr; return 0; } |
| 1 403 188 9511 8836 1155 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM sock #if !defined(_TRACE_SOCK_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SOCK_H #include <net/sock.h> #include <net/ipv6.h> #include <linux/tracepoint.h> #include <linux/ipv6.h> #include <linux/tcp.h> #include <trace/events/net_probe_common.h> #define family_names \ EM(AF_INET) \ EMe(AF_INET6) /* The protocol traced by inet_sock_set_state */ #define inet_protocol_names \ EM(IPPROTO_TCP) \ EM(IPPROTO_SCTP) \ EMe(IPPROTO_MPTCP) #define tcp_state_names \ EM(TCP_ESTABLISHED) \ EM(TCP_SYN_SENT) \ EM(TCP_SYN_RECV) \ EM(TCP_FIN_WAIT1) \ EM(TCP_FIN_WAIT2) \ EM(TCP_TIME_WAIT) \ EM(TCP_CLOSE) \ EM(TCP_CLOSE_WAIT) \ EM(TCP_LAST_ACK) \ EM(TCP_LISTEN) \ EM(TCP_CLOSING) \ EMe(TCP_NEW_SYN_RECV) #define skmem_kind_names \ EM(SK_MEM_SEND) \ EMe(SK_MEM_RECV) /* enums need to be exported to user space */ #undef EM #undef EMe #define EM(a) TRACE_DEFINE_ENUM(a); #define EMe(a) TRACE_DEFINE_ENUM(a); family_names inet_protocol_names tcp_state_names skmem_kind_names #undef EM #undef EMe #define EM(a) { a, #a }, #define EMe(a) { a, #a } #define show_family_name(val) \ __print_symbolic(val, family_names) #define show_inet_protocol_name(val) \ __print_symbolic(val, inet_protocol_names) #define show_tcp_state_name(val) \ __print_symbolic(val, tcp_state_names) #define show_skmem_kind_names(val) \ __print_symbolic(val, skmem_kind_names) TRACE_EVENT(sock_rcvqueue_full, TP_PROTO(struct sock *sk, struct sk_buff *skb), TP_ARGS(sk, skb), TP_STRUCT__entry( __field(int, rmem_alloc) __field(unsigned int, truesize) __field(int, sk_rcvbuf) ), TP_fast_assign( __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); __entry->truesize = skb->truesize; __entry->sk_rcvbuf = READ_ONCE(sk->sk_rcvbuf); ), TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d", __entry->rmem_alloc, __entry->truesize, __entry->sk_rcvbuf) ); TRACE_EVENT(sock_exceed_buf_limit, TP_PROTO(struct sock *sk, struct proto *prot, long allocated, int kind), TP_ARGS(sk, prot, allocated, kind), TP_STRUCT__entry( __array(char, name, 32) __array(long, sysctl_mem, 3) __field(long, allocated) __field(int, sysctl_rmem) __field(int, rmem_alloc) __field(int, sysctl_wmem) __field(int, wmem_alloc) __field(int, wmem_queued) __field(int, kind) ), TP_fast_assign( strscpy(__entry->name, prot->name, 32); __entry->sysctl_mem[0] = READ_ONCE(prot->sysctl_mem[0]); __entry->sysctl_mem[1] = READ_ONCE(prot->sysctl_mem[1]); __entry->sysctl_mem[2] = READ_ONCE(prot->sysctl_mem[2]); __entry->allocated = allocated; __entry->sysctl_rmem = sk_get_rmem0(sk, prot); __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); __entry->sysctl_wmem = sk_get_wmem0(sk, prot); __entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc); __entry->wmem_queued = READ_ONCE(sk->sk_wmem_queued); __entry->kind = kind; ), TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld sysctl_rmem=%d rmem_alloc=%d sysctl_wmem=%d wmem_alloc=%d wmem_queued=%d kind=%s", __entry->name, __entry->sysctl_mem[0], __entry->sysctl_mem[1], __entry->sysctl_mem[2], __entry->allocated, __entry->sysctl_rmem, __entry->rmem_alloc, __entry->sysctl_wmem, __entry->wmem_alloc, __entry->wmem_queued, show_skmem_kind_names(__entry->kind) ) ); TRACE_EVENT(inet_sock_set_state, TP_PROTO(const struct sock *sk, const int oldstate, const int newstate), TP_ARGS(sk, oldstate, newstate), TP_STRUCT__entry( __field(const void *, skaddr) __field(int, oldstate) __field(int, newstate) __field(__u16, sport) __field(__u16, dport) __field(__u16, family) __field(__u16, protocol) __array(__u8, saddr, 4) __array(__u8, daddr, 4) __array(__u8, saddr_v6, 16) __array(__u8, daddr_v6, 16) ), TP_fast_assign( const struct inet_sock *inet = inet_sk(sk); __be32 *p32; __entry->skaddr = sk; __entry->oldstate = oldstate; __entry->newstate = newstate; __entry->family = sk->sk_family; __entry->protocol = sk->sk_protocol; __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); p32 = (__be32 *) __entry->saddr; *p32 = inet->inet_saddr; p32 = (__be32 *) __entry->daddr; *p32 = inet->inet_daddr; TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr, sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); ), TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s", show_family_name(__entry->family), show_inet_protocol_name(__entry->protocol), __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, __entry->saddr_v6, __entry->daddr_v6, show_tcp_state_name(__entry->oldstate), show_tcp_state_name(__entry->newstate)) ); TRACE_EVENT(inet_sk_error_report, TP_PROTO(const struct sock *sk), TP_ARGS(sk), TP_STRUCT__entry( __field(int, error) __field(__u16, sport) __field(__u16, dport) __field(__u16, family) __field(__u16, protocol) __array(__u8, saddr, 4) __array(__u8, daddr, 4) __array(__u8, saddr_v6, 16) __array(__u8, daddr_v6, 16) ), TP_fast_assign( const struct inet_sock *inet = inet_sk(sk); __be32 *p32; __entry->error = sk->sk_err; __entry->family = sk->sk_family; __entry->protocol = sk->sk_protocol; __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); p32 = (__be32 *) __entry->saddr; *p32 = inet->inet_saddr; p32 = (__be32 *) __entry->daddr; *p32 = inet->inet_daddr; TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr, sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); ), TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c error=%d", show_family_name(__entry->family), show_inet_protocol_name(__entry->protocol), __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, __entry->saddr_v6, __entry->daddr_v6, __entry->error) ); TRACE_EVENT(sk_data_ready, TP_PROTO(const struct sock *sk), TP_ARGS(sk), TP_STRUCT__entry( __field(const void *, skaddr) __field(__u16, family) __field(__u16, protocol) __field(unsigned long, ip) ), TP_fast_assign( __entry->skaddr = sk; __entry->family = sk->sk_family; __entry->protocol = sk->sk_protocol; __entry->ip = _RET_IP_; ), TP_printk("family=%u protocol=%u func=%ps", __entry->family, __entry->protocol, (void *)__entry->ip) ); /* * sock send/recv msg length */ DECLARE_EVENT_CLASS(sock_msg_length, TP_PROTO(struct sock *sk, int ret, int flags), TP_ARGS(sk, ret, flags), TP_STRUCT__entry( __field(void *, sk) __field(__u16, family) __field(__u16, protocol) __field(int, ret) __field(int, flags) ), TP_fast_assign( __entry->sk = sk; __entry->family = sk->sk_family; __entry->protocol = sk->sk_protocol; __entry->ret = ret; __entry->flags = flags; ), TP_printk("sk address = %p, family = %s protocol = %s, length = %d, error = %d, flags = 0x%x", __entry->sk, show_family_name(__entry->family), show_inet_protocol_name(__entry->protocol), !(__entry->flags & MSG_PEEK) ? (__entry->ret > 0 ? __entry->ret : 0) : 0, __entry->ret < 0 ? __entry->ret : 0, __entry->flags) ); DEFINE_EVENT(sock_msg_length, sock_send_length, TP_PROTO(struct sock *sk, int ret, int flags), TP_ARGS(sk, ret, flags) ); DEFINE_EVENT(sock_msg_length, sock_recv_length, TP_PROTO(struct sock *sk, int ret, int flags), TP_ARGS(sk, ret, flags) ); #endif /* _TRACE_SOCK_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
| 7 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 | #ifndef _CRYPTO_GCM_H #define _CRYPTO_GCM_H #include <linux/errno.h> #include <crypto/aes.h> #include <crypto/gf128mul.h> #define GCM_AES_IV_SIZE 12 #define GCM_RFC4106_IV_SIZE 8 #define GCM_RFC4543_IV_SIZE 8 /* * validate authentication tag for GCM */ static inline int crypto_gcm_check_authsize(unsigned int authsize) { switch (authsize) { case 4: case 8: case 12: case 13: case 14: case 15: case 16: break; default: return -EINVAL; } return 0; } /* * validate authentication tag for RFC4106 */ static inline int crypto_rfc4106_check_authsize(unsigned int authsize) { switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } return 0; } /* * validate assoclen for RFC4106/RFC4543 */ static inline int crypto_ipsec_check_assoclen(unsigned int assoclen) { switch (assoclen) { case 16: case 20: break; default: return -EINVAL; } return 0; } struct aesgcm_ctx { be128 ghash_key; struct crypto_aes_ctx aes_ctx; unsigned int authsize; }; int aesgcm_expandkey(struct aesgcm_ctx *ctx, const u8 *key, unsigned int keysize, unsigned int authsize); void aesgcm_encrypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src, int crypt_len, const u8 *assoc, int assoc_len, const u8 iv[GCM_AES_IV_SIZE], u8 *authtag); bool __must_check aesgcm_decrypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src, int crypt_len, const u8 *assoc, int assoc_len, const u8 iv[GCM_AES_IV_SIZE], const u8 *authtag); #endif |
| 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for some monterey "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" static const __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { hid_info(hdev, "fixing up button/consumer in HID report descriptor\n"); rdesc[30] = 0x0c; } return rdesc; } #define mr_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int mr_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case 0x156: mr_map_key_clear(KEY_WORDPROCESSOR); break; case 0x157: mr_map_key_clear(KEY_SPREADSHEET); break; case 0x158: mr_map_key_clear(KEY_PRESENTATION); break; case 0x15c: mr_map_key_clear(KEY_STOP); break; default: return 0; } return 1; } static const struct hid_device_id mr_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, { } }; MODULE_DEVICE_TABLE(hid, mr_devices); static struct hid_driver mr_driver = { .name = "monterey", .id_table = mr_devices, .report_fixup = mr_report_fixup, .input_mapping = mr_input_mapping, }; module_hid_driver(mr_driver); MODULE_DESCRIPTION("HID driver for some monterey \"special\" devices"); MODULE_LICENSE("GPL"); |
| 2 1 1 3 3 1 1 43 42 1 43 43 43 7 1 1 1 1 1 1 1 3 3 3 3 3 3 7 7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 | // SPDX-License-Identifier: GPL-2.0-only /* * Minimal file system backend for holding eBPF maps and programs, * used by bpf(2) object pinning. * * Authors: * * Daniel Borkmann <daniel@iogearbox.net> */ #include <linux/init.h> #include <linux/magic.h> #include <linux/major.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/fs.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/kdev_t.h> #include <linux/filter.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/kstrtox.h> #include "preload/bpf_preload.h" enum bpf_type { BPF_TYPE_UNSPEC = 0, BPF_TYPE_PROG, BPF_TYPE_MAP, BPF_TYPE_LINK, }; static void *bpf_any_get(void *raw, enum bpf_type type) { switch (type) { case BPF_TYPE_PROG: bpf_prog_inc(raw); break; case BPF_TYPE_MAP: bpf_map_inc_with_uref(raw); break; case BPF_TYPE_LINK: bpf_link_inc(raw); break; default: WARN_ON_ONCE(1); break; } return raw; } static void bpf_any_put(void *raw, enum bpf_type type) { switch (type) { case BPF_TYPE_PROG: bpf_prog_put(raw); break; case BPF_TYPE_MAP: bpf_map_put_with_uref(raw); break; case BPF_TYPE_LINK: bpf_link_put(raw); break; default: WARN_ON_ONCE(1); break; } } static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type) { void *raw; raw = bpf_map_get_with_uref(ufd); if (!IS_ERR(raw)) { *type = BPF_TYPE_MAP; return raw; } raw = bpf_prog_get(ufd); if (!IS_ERR(raw)) { *type = BPF_TYPE_PROG; return raw; } raw = bpf_link_get_from_fd(ufd); if (!IS_ERR(raw)) { *type = BPF_TYPE_LINK; return raw; } return ERR_PTR(-EINVAL); } static const struct inode_operations bpf_dir_iops; static const struct inode_operations bpf_prog_iops = { }; static const struct inode_operations bpf_map_iops = { }; static const struct inode_operations bpf_link_iops = { }; struct inode *bpf_get_inode(struct super_block *sb, const struct inode *dir, umode_t mode) { struct inode *inode; switch (mode & S_IFMT) { case S_IFDIR: case S_IFREG: case S_IFLNK: break; default: return ERR_PTR(-EINVAL); } inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOSPC); inode->i_ino = get_next_ino(); simple_inode_init_ts(inode); inode_init_owner(&nop_mnt_idmap, inode, dir, mode); return inode; } static int bpf_inode_type(const struct inode *inode, enum bpf_type *type) { *type = BPF_TYPE_UNSPEC; if (inode->i_op == &bpf_prog_iops) *type = BPF_TYPE_PROG; else if (inode->i_op == &bpf_map_iops) *type = BPF_TYPE_MAP; else if (inode->i_op == &bpf_link_iops) *type = BPF_TYPE_LINK; else return -EACCES; return 0; } static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode, struct inode *dir) { d_instantiate(dentry, inode); dget(dentry); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); } static struct dentry *bpf_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { struct inode *inode; inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR); if (IS_ERR(inode)) return ERR_CAST(inode); inode->i_op = &bpf_dir_iops; inode->i_fop = &simple_dir_operations; inc_nlink(inode); inc_nlink(dir); bpf_dentry_finalize(dentry, inode, dir); return NULL; } struct map_iter { void *key; bool done; }; static struct map_iter *map_iter(struct seq_file *m) { return m->private; } static struct bpf_map *seq_file_to_map(struct seq_file *m) { return file_inode(m->file)->i_private; } static void map_iter_free(struct map_iter *iter) { if (iter) { kfree(iter->key); kfree(iter); } } static struct map_iter *map_iter_alloc(struct bpf_map *map) { struct map_iter *iter; iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN); if (!iter) goto error; iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN); if (!iter->key) goto error; return iter; error: map_iter_free(iter); return NULL; } static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos) { struct bpf_map *map = seq_file_to_map(m); void *key = map_iter(m)->key; void *prev_key; (*pos)++; if (map_iter(m)->done) return NULL; if (unlikely(v == SEQ_START_TOKEN)) prev_key = NULL; else prev_key = key; rcu_read_lock(); if (map->ops->map_get_next_key(map, prev_key, key)) { map_iter(m)->done = true; key = NULL; } rcu_read_unlock(); return key; } static void *map_seq_start(struct seq_file *m, loff_t *pos) { if (map_iter(m)->done) return NULL; return *pos ? map_iter(m)->key : SEQ_START_TOKEN; } static void map_seq_stop(struct seq_file *m, void *v) { } static int map_seq_show(struct seq_file *m, void *v) { struct bpf_map *map = seq_file_to_map(m); void *key = map_iter(m)->key; if (unlikely(v == SEQ_START_TOKEN)) { seq_puts(m, "# WARNING!! The output is for debug purpose only\n"); seq_puts(m, "# WARNING!! The output format will change\n"); } else { map->ops->map_seq_show_elem(map, key, m); } return 0; } static const struct seq_operations bpffs_map_seq_ops = { .start = map_seq_start, .next = map_seq_next, .show = map_seq_show, .stop = map_seq_stop, }; static int bpffs_map_open(struct inode *inode, struct file *file) { struct bpf_map *map = inode->i_private; struct map_iter *iter; struct seq_file *m; int err; iter = map_iter_alloc(map); if (!iter) return -ENOMEM; err = seq_open(file, &bpffs_map_seq_ops); if (err) { map_iter_free(iter); return err; } m = file->private_data; m->private = iter; return 0; } static int bpffs_map_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; map_iter_free(map_iter(m)); return seq_release(inode, file); } /* bpffs_map_fops should only implement the basic * read operation for a BPF map. The purpose is to * provide a simple user intuitive way to do * "cat bpffs/pathto/a-pinned-map". * * Other operations (e.g. write, lookup...) should be realized by * the userspace tools (e.g. bpftool) through the * BPF_OBJ_GET_INFO_BY_FD and the map's lookup/update * interface. */ static const struct file_operations bpffs_map_fops = { .open = bpffs_map_open, .read = seq_read, .release = bpffs_map_release, }; static int bpffs_obj_open(struct inode *inode, struct file *file) { return -EIO; } static const struct file_operations bpffs_obj_fops = { .open = bpffs_obj_open, }; static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw, const struct inode_operations *iops, const struct file_operations *fops) { struct inode *dir = dentry->d_parent->d_inode; struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = iops; inode->i_fop = fops; inode->i_private = raw; bpf_dentry_finalize(dentry, inode, dir); return 0; } static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg) { return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops, &bpffs_obj_fops); } static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg) { struct bpf_map *map = arg; return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops, bpf_map_support_seq_show(map) ? &bpffs_map_fops : &bpffs_obj_fops); } static int bpf_mklink(struct dentry *dentry, umode_t mode, void *arg) { struct bpf_link *link = arg; return bpf_mkobj_ops(dentry, mode, arg, &bpf_link_iops, bpf_link_is_iter(link) ? &bpf_iter_fops : &bpffs_obj_fops); } static struct dentry * bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags) { /* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future * extensions. That allows popoulate_bpffs() create special files. */ if ((dir->i_mode & S_IALLUGO) && strchr(dentry->d_name.name, '.')) return ERR_PTR(-EPERM); return simple_lookup(dir, dentry, flags); } static int bpf_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *target) { char *link = kstrdup(target, GFP_USER | __GFP_NOWARN); struct inode *inode; if (!link) return -ENOMEM; inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK); if (IS_ERR(inode)) { kfree(link); return PTR_ERR(inode); } inode->i_op = &simple_symlink_inode_operations; inode->i_link = link; bpf_dentry_finalize(dentry, inode, dir); return 0; } static const struct inode_operations bpf_dir_iops = { .lookup = bpf_lookup, .mkdir = bpf_mkdir, .symlink = bpf_symlink, .rmdir = simple_rmdir, .rename = simple_rename, .link = simple_link, .unlink = simple_unlink, }; /* pin iterator link into bpffs */ static int bpf_iter_link_pin_kernel(struct dentry *parent, const char *name, struct bpf_link *link) { umode_t mode = S_IFREG | S_IRUSR; struct dentry *dentry; int ret; inode_lock(parent->d_inode); dentry = lookup_noperm(&QSTR(name), parent); if (IS_ERR(dentry)) { inode_unlock(parent->d_inode); return PTR_ERR(dentry); } ret = bpf_mkobj_ops(dentry, mode, link, &bpf_link_iops, &bpf_iter_fops); dput(dentry); inode_unlock(parent->d_inode); return ret; } static int bpf_obj_do_pin(int path_fd, const char __user *pathname, void *raw, enum bpf_type type) { struct dentry *dentry; struct inode *dir; struct path path; umode_t mode; int ret; dentry = user_path_create(path_fd, pathname, &path, 0); if (IS_ERR(dentry)) return PTR_ERR(dentry); dir = d_inode(path.dentry); if (dir->i_op != &bpf_dir_iops) { ret = -EPERM; goto out; } mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask()); ret = security_path_mknod(&path, dentry, mode, 0); if (ret) goto out; switch (type) { case BPF_TYPE_PROG: ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw); break; case BPF_TYPE_MAP: ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw); break; case BPF_TYPE_LINK: ret = vfs_mkobj(dentry, mode, bpf_mklink, raw); break; default: ret = -EPERM; } out: done_path_create(&path, dentry); return ret; } int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname) { enum bpf_type type; void *raw; int ret; raw = bpf_fd_probe_obj(ufd, &type); if (IS_ERR(raw)) return PTR_ERR(raw); ret = bpf_obj_do_pin(path_fd, pathname, raw, type); if (ret != 0) bpf_any_put(raw, type); return ret; } static void *bpf_obj_do_get(int path_fd, const char __user *pathname, enum bpf_type *type, int flags) { struct inode *inode; struct path path; void *raw; int ret; ret = user_path_at(path_fd, pathname, LOOKUP_FOLLOW, &path); if (ret) return ERR_PTR(ret); inode = d_backing_inode(path.dentry); ret = path_permission(&path, ACC_MODE(flags)); if (ret) goto out; ret = bpf_inode_type(inode, type); if (ret) goto out; raw = bpf_any_get(inode->i_private, *type); if (!IS_ERR(raw)) touch_atime(&path); path_put(&path); return raw; out: path_put(&path); return ERR_PTR(ret); } int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags) { enum bpf_type type = BPF_TYPE_UNSPEC; int f_flags; void *raw; int ret; f_flags = bpf_get_file_flag(flags); if (f_flags < 0) return f_flags; raw = bpf_obj_do_get(path_fd, pathname, &type, f_flags); if (IS_ERR(raw)) return PTR_ERR(raw); if (type == BPF_TYPE_PROG) ret = bpf_prog_new_fd(raw); else if (type == BPF_TYPE_MAP) ret = bpf_map_new_fd(raw, f_flags); else if (type == BPF_TYPE_LINK) ret = (f_flags != O_RDWR) ? -EINVAL : bpf_link_new_fd(raw); else return -ENOENT; if (ret < 0) bpf_any_put(raw, type); return ret; } static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type) { struct bpf_prog *prog; int ret = inode_permission(&nop_mnt_idmap, inode, MAY_READ); if (ret) return ERR_PTR(ret); if (inode->i_op == &bpf_map_iops) return ERR_PTR(-EINVAL); if (inode->i_op == &bpf_link_iops) return ERR_PTR(-EINVAL); if (inode->i_op != &bpf_prog_iops) return ERR_PTR(-EACCES); prog = inode->i_private; ret = security_bpf_prog(prog); if (ret < 0) return ERR_PTR(ret); if (!bpf_prog_get_ok(prog, &type, false)) return ERR_PTR(-EINVAL); bpf_prog_inc(prog); return prog; } struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type) { struct bpf_prog *prog; struct path path; int ret = kern_path(name, LOOKUP_FOLLOW, &path); if (ret) return ERR_PTR(ret); prog = __get_prog_inode(d_backing_inode(path.dentry), type); if (!IS_ERR(prog)) touch_atime(&path); path_put(&path); return prog; } EXPORT_SYMBOL(bpf_prog_get_type_path); struct bpffs_btf_enums { const struct btf *btf; const struct btf_type *cmd_t; const struct btf_type *map_t; const struct btf_type *prog_t; const struct btf_type *attach_t; }; static int find_bpffs_btf_enums(struct bpffs_btf_enums *info) { const struct btf *btf; const struct btf_type *t; const char *name; int i, n; memset(info, 0, sizeof(*info)); btf = bpf_get_btf_vmlinux(); if (IS_ERR(btf)) return PTR_ERR(btf); if (!btf) return -ENOENT; info->btf = btf; for (i = 1, n = btf_nr_types(btf); i < n; i++) { t = btf_type_by_id(btf, i); if (!btf_type_is_enum(t)) continue; name = btf_name_by_offset(btf, t->name_off); if (!name) continue; if (strcmp(name, "bpf_cmd") == 0) info->cmd_t = t; else if (strcmp(name, "bpf_map_type") == 0) info->map_t = t; else if (strcmp(name, "bpf_prog_type") == 0) info->prog_t = t; else if (strcmp(name, "bpf_attach_type") == 0) info->attach_t = t; else continue; if (info->cmd_t && info->map_t && info->prog_t && info->attach_t) return 0; } return -ESRCH; } static bool find_btf_enum_const(const struct btf *btf, const struct btf_type *enum_t, const char *prefix, const char *str, int *value) { const struct btf_enum *e; const char *name; int i, n, pfx_len = strlen(prefix); *value = 0; if (!btf || !enum_t) return false; for (i = 0, n = btf_vlen(enum_t); i < n; i++) { e = &btf_enum(enum_t)[i]; name = btf_name_by_offset(btf, e->name_off); if (!name || strncasecmp(name, prefix, pfx_len) != 0) continue; /* match symbolic name case insensitive and ignoring prefix */ if (strcasecmp(name + pfx_len, str) == 0) { *value = e->val; return true; } } return false; } static void seq_print_delegate_opts(struct seq_file *m, const char *opt_name, const struct btf *btf, const struct btf_type *enum_t, const char *prefix, u64 delegate_msk, u64 any_msk) { const struct btf_enum *e; bool first = true; const char *name; u64 msk; int i, n, pfx_len = strlen(prefix); delegate_msk &= any_msk; /* clear unknown bits */ if (delegate_msk == 0) return; seq_printf(m, ",%s", opt_name); if (delegate_msk == any_msk) { seq_printf(m, "=any"); return; } if (btf && enum_t) { for (i = 0, n = btf_vlen(enum_t); i < n; i++) { e = &btf_enum(enum_t)[i]; name = btf_name_by_offset(btf, e->name_off); if (!name || strncasecmp(name, prefix, pfx_len) != 0) continue; msk = 1ULL << e->val; if (delegate_msk & msk) { /* emit lower-case name without prefix */ seq_putc(m, first ? '=' : ':'); name += pfx_len; while (*name) { seq_putc(m, tolower(*name)); name++; } delegate_msk &= ~msk; first = false; } } } if (delegate_msk) seq_printf(m, "%c0x%llx", first ? '=' : ':', delegate_msk); } /* * Display the mount options in /proc/mounts. */ static int bpf_show_options(struct seq_file *m, struct dentry *root) { struct inode *inode = d_inode(root); umode_t mode = inode->i_mode & S_IALLUGO & ~S_ISVTX; struct bpf_mount_opts *opts = root->d_sb->s_fs_info; u64 mask; if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID)) seq_printf(m, ",uid=%u", from_kuid_munged(&init_user_ns, inode->i_uid)); if (!gid_eq(inode->i_gid, GLOBAL_ROOT_GID)) seq_printf(m, ",gid=%u", from_kgid_munged(&init_user_ns, inode->i_gid)); if (mode != S_IRWXUGO) seq_printf(m, ",mode=%o", mode); if (opts->delegate_cmds || opts->delegate_maps || opts->delegate_progs || opts->delegate_attachs) { struct bpffs_btf_enums info; /* ignore errors, fallback to hex */ (void)find_bpffs_btf_enums(&info); mask = (1ULL << __MAX_BPF_CMD) - 1; seq_print_delegate_opts(m, "delegate_cmds", info.btf, info.cmd_t, "BPF_", opts->delegate_cmds, mask); mask = (1ULL << __MAX_BPF_MAP_TYPE) - 1; seq_print_delegate_opts(m, "delegate_maps", info.btf, info.map_t, "BPF_MAP_TYPE_", opts->delegate_maps, mask); mask = (1ULL << __MAX_BPF_PROG_TYPE) - 1; seq_print_delegate_opts(m, "delegate_progs", info.btf, info.prog_t, "BPF_PROG_TYPE_", opts->delegate_progs, mask); mask = (1ULL << __MAX_BPF_ATTACH_TYPE) - 1; seq_print_delegate_opts(m, "delegate_attachs", info.btf, info.attach_t, "BPF_", opts->delegate_attachs, mask); } return 0; } static void bpf_free_inode(struct inode *inode) { enum bpf_type type; if (S_ISLNK(inode->i_mode)) kfree(inode->i_link); if (!bpf_inode_type(inode, &type)) bpf_any_put(inode->i_private, type); free_inode_nonrcu(inode); } const struct super_operations bpf_super_ops = { .statfs = simple_statfs, .drop_inode = generic_delete_inode, .show_options = bpf_show_options, .free_inode = bpf_free_inode, }; enum { OPT_UID, OPT_GID, OPT_MODE, OPT_DELEGATE_CMDS, OPT_DELEGATE_MAPS, OPT_DELEGATE_PROGS, OPT_DELEGATE_ATTACHS, }; static const struct fs_parameter_spec bpf_fs_parameters[] = { fsparam_u32 ("uid", OPT_UID), fsparam_u32 ("gid", OPT_GID), fsparam_u32oct ("mode", OPT_MODE), fsparam_string ("delegate_cmds", OPT_DELEGATE_CMDS), fsparam_string ("delegate_maps", OPT_DELEGATE_MAPS), fsparam_string ("delegate_progs", OPT_DELEGATE_PROGS), fsparam_string ("delegate_attachs", OPT_DELEGATE_ATTACHS), {} }; static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct bpf_mount_opts *opts = fc->s_fs_info; struct fs_parse_result result; kuid_t uid; kgid_t gid; int opt, err; opt = fs_parse(fc, bpf_fs_parameters, param, &result); if (opt < 0) { /* We might like to report bad mount options here, but * traditionally we've ignored all mount options, so we'd * better continue to ignore non-existing options for bpf. */ if (opt == -ENOPARAM) { opt = vfs_parse_fs_param_source(fc, param); if (opt != -ENOPARAM) return opt; return 0; } if (opt < 0) return opt; } switch (opt) { case OPT_UID: uid = make_kuid(current_user_ns(), result.uint_32); if (!uid_valid(uid)) goto bad_value; /* * The requested uid must be representable in the * filesystem's idmapping. */ if (!kuid_has_mapping(fc->user_ns, uid)) goto bad_value; opts->uid = uid; break; case OPT_GID: gid = make_kgid(current_user_ns(), result.uint_32); if (!gid_valid(gid)) goto bad_value; /* * The requested gid must be representable in the * filesystem's idmapping. */ if (!kgid_has_mapping(fc->user_ns, gid)) goto bad_value; opts->gid = gid; break; case OPT_MODE: opts->mode = result.uint_32 & S_IALLUGO; break; case OPT_DELEGATE_CMDS: case OPT_DELEGATE_MAPS: case OPT_DELEGATE_PROGS: case OPT_DELEGATE_ATTACHS: { struct bpffs_btf_enums info; const struct btf_type *enum_t; const char *enum_pfx; u64 *delegate_msk, msk = 0; char *p, *str; int val; /* ignore errors, fallback to hex */ (void)find_bpffs_btf_enums(&info); switch (opt) { case OPT_DELEGATE_CMDS: delegate_msk = &opts->delegate_cmds; enum_t = info.cmd_t; enum_pfx = "BPF_"; break; case OPT_DELEGATE_MAPS: delegate_msk = &opts->delegate_maps; enum_t = info.map_t; enum_pfx = "BPF_MAP_TYPE_"; break; case OPT_DELEGATE_PROGS: delegate_msk = &opts->delegate_progs; enum_t = info.prog_t; enum_pfx = "BPF_PROG_TYPE_"; break; case OPT_DELEGATE_ATTACHS: delegate_msk = &opts->delegate_attachs; enum_t = info.attach_t; enum_pfx = "BPF_"; break; default: return -EINVAL; } str = param->string; while ((p = strsep(&str, ":"))) { if (strcmp(p, "any") == 0) { msk |= ~0ULL; } else if (find_btf_enum_const(info.btf, enum_t, enum_pfx, p, &val)) { msk |= 1ULL << val; } else { err = kstrtou64(p, 0, &msk); if (err) return err; } } /* Setting delegation mount options requires privileges */ if (msk && !capable(CAP_SYS_ADMIN)) return -EPERM; *delegate_msk |= msk; break; } default: /* ignore unknown mount options */ break; } return 0; bad_value: return invalfc(fc, "Bad value for '%s'", param->key); } struct bpf_preload_ops *bpf_preload_ops; EXPORT_SYMBOL_GPL(bpf_preload_ops); static bool bpf_preload_mod_get(void) { /* If bpf_preload.ko wasn't loaded earlier then load it now. * When bpf_preload is built into vmlinux the module's __init * function will populate it. */ if (!bpf_preload_ops) { request_module("bpf_preload"); if (!bpf_preload_ops) return false; } /* And grab the reference, so the module doesn't disappear while the * kernel is interacting with the kernel module and its UMD. */ if (!try_module_get(bpf_preload_ops->owner)) { pr_err("bpf_preload module get failed.\n"); return false; } return true; } static void bpf_preload_mod_put(void) { if (bpf_preload_ops) /* now user can "rmmod bpf_preload" if necessary */ module_put(bpf_preload_ops->owner); } static DEFINE_MUTEX(bpf_preload_lock); static int populate_bpffs(struct dentry *parent) { struct bpf_preload_info objs[BPF_PRELOAD_LINKS] = {}; int err = 0, i; /* grab the mutex to make sure the kernel interactions with bpf_preload * are serialized */ mutex_lock(&bpf_preload_lock); /* if bpf_preload.ko wasn't built into vmlinux then load it */ if (!bpf_preload_mod_get()) goto out; err = bpf_preload_ops->preload(objs); if (err) goto out_put; for (i = 0; i < BPF_PRELOAD_LINKS; i++) { bpf_link_inc(objs[i].link); err = bpf_iter_link_pin_kernel(parent, objs[i].link_name, objs[i].link); if (err) { bpf_link_put(objs[i].link); goto out_put; } } out_put: bpf_preload_mod_put(); out: mutex_unlock(&bpf_preload_lock); return err; } static int bpf_fill_super(struct super_block *sb, struct fs_context *fc) { static const struct tree_descr bpf_rfiles[] = { { "" } }; struct bpf_mount_opts *opts = sb->s_fs_info; struct inode *inode; int ret; /* Mounting an instance of BPF FS requires privileges */ if (fc->user_ns != &init_user_ns && !capable(CAP_SYS_ADMIN)) return -EPERM; ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles); if (ret) return ret; sb->s_op = &bpf_super_ops; inode = sb->s_root->d_inode; inode->i_uid = opts->uid; inode->i_gid = opts->gid; inode->i_op = &bpf_dir_iops; inode->i_mode &= ~S_IALLUGO; populate_bpffs(sb->s_root); inode->i_mode |= S_ISVTX | opts->mode; return 0; } static int bpf_get_tree(struct fs_context *fc) { return get_tree_nodev(fc, bpf_fill_super); } static void bpf_free_fc(struct fs_context *fc) { kfree(fc->s_fs_info); } static const struct fs_context_operations bpf_context_ops = { .free = bpf_free_fc, .parse_param = bpf_parse_param, .get_tree = bpf_get_tree, }; /* * Set up the filesystem mount context. */ static int bpf_init_fs_context(struct fs_context *fc) { struct bpf_mount_opts *opts; opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL); if (!opts) return -ENOMEM; opts->mode = S_IRWXUGO; opts->uid = current_fsuid(); opts->gid = current_fsgid(); /* start out with no BPF token delegation enabled */ opts->delegate_cmds = 0; opts->delegate_maps = 0; opts->delegate_progs = 0; opts->delegate_attachs = 0; fc->s_fs_info = opts; fc->ops = &bpf_context_ops; return 0; } static void bpf_kill_super(struct super_block *sb) { struct bpf_mount_opts *opts = sb->s_fs_info; kill_litter_super(sb); kfree(opts); } static struct file_system_type bpf_fs_type = { .owner = THIS_MODULE, .name = "bpf", .init_fs_context = bpf_init_fs_context, .parameters = bpf_fs_parameters, .kill_sb = bpf_kill_super, .fs_flags = FS_USERNS_MOUNT, }; static int __init bpf_init(void) { int ret; ret = sysfs_create_mount_point(fs_kobj, "bpf"); if (ret) return ret; ret = register_filesystem(&bpf_fs_type); if (ret) sysfs_remove_mount_point(fs_kobj, "bpf"); return ret; } fs_initcall(bpf_init); |
| 6 7 7 3 2 7 7 7 7 7 6 7 7 7 6 7 7 7 7 3 7 7 2 1 3 6 1 1 7 7 3 1 3 3 3 2 3 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 | // SPDX-License-Identifier: GPL-2.0-or-later /* procfs files for key database enumeration * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/init.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/errno.h> #include "internal.h" static void *proc_keys_start(struct seq_file *p, loff_t *_pos); static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos); static void proc_keys_stop(struct seq_file *p, void *v); static int proc_keys_show(struct seq_file *m, void *v); static const struct seq_operations proc_keys_ops = { .start = proc_keys_start, .next = proc_keys_next, .stop = proc_keys_stop, .show = proc_keys_show, }; static void *proc_key_users_start(struct seq_file *p, loff_t *_pos); static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos); static void proc_key_users_stop(struct seq_file *p, void *v); static int proc_key_users_show(struct seq_file *m, void *v); static const struct seq_operations proc_key_users_ops = { .start = proc_key_users_start, .next = proc_key_users_next, .stop = proc_key_users_stop, .show = proc_key_users_show, }; /* * Declare the /proc files. */ static int __init key_proc_init(void) { struct proc_dir_entry *p; p = proc_create_seq("keys", 0, NULL, &proc_keys_ops); if (!p) panic("Cannot create /proc/keys\n"); p = proc_create_seq("key-users", 0, NULL, &proc_key_users_ops); if (!p) panic("Cannot create /proc/key-users\n"); return 0; } __initcall(key_proc_init); /* * Implement "/proc/keys" to provide a list of the keys on the system that * grant View permission to the caller. */ static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n) { struct user_namespace *user_ns = seq_user_ns(p); n = rb_next(n); while (n) { struct key *key = rb_entry(n, struct key, serial_node); if (kuid_has_mapping(user_ns, key->user->uid)) break; n = rb_next(n); } return n; } static struct key *find_ge_key(struct seq_file *p, key_serial_t id) { struct user_namespace *user_ns = seq_user_ns(p); struct rb_node *n = key_serial_tree.rb_node; struct key *minkey = NULL; while (n) { struct key *key = rb_entry(n, struct key, serial_node); if (id < key->serial) { if (!minkey || minkey->serial > key->serial) minkey = key; n = n->rb_left; } else if (id > key->serial) { n = n->rb_right; } else { minkey = key; break; } key = NULL; } if (!minkey) return NULL; for (;;) { if (kuid_has_mapping(user_ns, minkey->user->uid)) return minkey; n = rb_next(&minkey->serial_node); if (!n) return NULL; minkey = rb_entry(n, struct key, serial_node); } } static void *proc_keys_start(struct seq_file *p, loff_t *_pos) __acquires(key_serial_lock) { key_serial_t pos = *_pos; struct key *key; spin_lock(&key_serial_lock); if (*_pos > INT_MAX) return NULL; key = find_ge_key(p, pos); if (!key) return NULL; *_pos = key->serial; return &key->serial_node; } static inline key_serial_t key_node_serial(struct rb_node *n) { struct key *key = rb_entry(n, struct key, serial_node); return key->serial; } static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos) { struct rb_node *n; n = key_serial_next(p, v); if (n) *_pos = key_node_serial(n); else (*_pos)++; return n; } static void proc_keys_stop(struct seq_file *p, void *v) __releases(key_serial_lock) { spin_unlock(&key_serial_lock); } static int proc_keys_show(struct seq_file *m, void *v) { struct rb_node *_p = v; struct key *key = rb_entry(_p, struct key, serial_node); unsigned long flags; key_ref_t key_ref, skey_ref; time64_t now, expiry; char xbuf[16]; short state; u64 timo; int rc; struct keyring_search_context ctx = { .index_key = key->index_key, .cred = m->file->f_cred, .match_data.cmp = lookup_user_key_possessed, .match_data.raw_data = key, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_RECURSE), }; key_ref = make_key_ref(key, 0); /* determine if the key is possessed by this process (a test we can * skip if the key does not indicate the possessor can view it */ if (key->perm & KEY_POS_VIEW) { rcu_read_lock(); skey_ref = search_cred_keyrings_rcu(&ctx); rcu_read_unlock(); if (!IS_ERR(skey_ref)) { key_ref_put(skey_ref); key_ref = make_key_ref(key, 1); } } /* check whether the current task is allowed to view the key */ rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW); if (rc < 0) return 0; now = ktime_get_real_seconds(); rcu_read_lock(); /* come up with a suitable timeout value */ expiry = READ_ONCE(key->expiry); if (expiry == TIME64_MAX) { memcpy(xbuf, "perm", 5); } else if (now >= expiry) { memcpy(xbuf, "expd", 5); } else { timo = expiry - now; if (timo < 60) sprintf(xbuf, "%llus", timo); else if (timo < 60*60) sprintf(xbuf, "%llum", div_u64(timo, 60)); else if (timo < 60*60*24) sprintf(xbuf, "%lluh", div_u64(timo, 60 * 60)); else if (timo < 60*60*24*7) sprintf(xbuf, "%llud", div_u64(timo, 60 * 60 * 24)); else sprintf(xbuf, "%lluw", div_u64(timo, 60 * 60 * 24 * 7)); } state = key_read_state(key); #define showflag(FLAGS, LETTER, FLAG) \ ((FLAGS & (1 << FLAG)) ? LETTER : '-') flags = READ_ONCE(key->flags); seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ", key->serial, state != KEY_IS_UNINSTANTIATED ? 'I' : '-', showflag(flags, 'R', KEY_FLAG_REVOKED), showflag(flags, 'D', KEY_FLAG_DEAD), showflag(flags, 'Q', KEY_FLAG_IN_QUOTA), showflag(flags, 'U', KEY_FLAG_USER_CONSTRUCT), state < 0 ? 'N' : '-', showflag(flags, 'i', KEY_FLAG_INVALIDATED), refcount_read(&key->usage), xbuf, key->perm, from_kuid_munged(seq_user_ns(m), key->uid), from_kgid_munged(seq_user_ns(m), key->gid), key->type->name); #undef showflag if (key->type->describe) key->type->describe(key, m); seq_putc(m, '\n'); rcu_read_unlock(); return 0; } static struct rb_node *__key_user_next(struct user_namespace *user_ns, struct rb_node *n) { while (n) { struct key_user *user = rb_entry(n, struct key_user, node); if (kuid_has_mapping(user_ns, user->uid)) break; n = rb_next(n); } return n; } static struct rb_node *key_user_next(struct user_namespace *user_ns, struct rb_node *n) { return __key_user_next(user_ns, rb_next(n)); } static struct rb_node *key_user_first(struct user_namespace *user_ns, struct rb_root *r) { struct rb_node *n = rb_first(r); return __key_user_next(user_ns, n); } static void *proc_key_users_start(struct seq_file *p, loff_t *_pos) __acquires(key_user_lock) { struct rb_node *_p; loff_t pos = *_pos; spin_lock(&key_user_lock); _p = key_user_first(seq_user_ns(p), &key_user_tree); while (pos > 0 && _p) { pos--; _p = key_user_next(seq_user_ns(p), _p); } return _p; } static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos) { (*_pos)++; return key_user_next(seq_user_ns(p), (struct rb_node *)v); } static void proc_key_users_stop(struct seq_file *p, void *v) __releases(key_user_lock) { spin_unlock(&key_user_lock); } static int proc_key_users_show(struct seq_file *m, void *v) { struct rb_node *_p = v; struct key_user *user = rb_entry(_p, struct key_user, node); unsigned maxkeys = uid_eq(user->uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(user->uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n", from_kuid_munged(seq_user_ns(m), user->uid), refcount_read(&user->usage), atomic_read(&user->nkeys), atomic_read(&user->nikeys), user->qnkeys, maxkeys, user->qnbytes, maxbytes); return 0; } |
| 41 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM vsock #if !defined(_TRACE_VSOCK_VIRTIO_TRANSPORT_COMMON_H) || \ defined(TRACE_HEADER_MULTI_READ) #define _TRACE_VSOCK_VIRTIO_TRANSPORT_COMMON_H #include <linux/tracepoint.h> TRACE_DEFINE_ENUM(VIRTIO_VSOCK_TYPE_STREAM); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_TYPE_SEQPACKET); #define show_type(val) \ __print_symbolic(val, \ { VIRTIO_VSOCK_TYPE_STREAM, "STREAM" }, \ { VIRTIO_VSOCK_TYPE_SEQPACKET, "SEQPACKET" }) TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_INVALID); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_REQUEST); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_RESPONSE); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_RST); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_SHUTDOWN); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_RW); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_CREDIT_UPDATE); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_CREDIT_REQUEST); #define show_op(val) \ __print_symbolic(val, \ { VIRTIO_VSOCK_OP_INVALID, "INVALID" }, \ { VIRTIO_VSOCK_OP_REQUEST, "REQUEST" }, \ { VIRTIO_VSOCK_OP_RESPONSE, "RESPONSE" }, \ { VIRTIO_VSOCK_OP_RST, "RST" }, \ { VIRTIO_VSOCK_OP_SHUTDOWN, "SHUTDOWN" }, \ { VIRTIO_VSOCK_OP_RW, "RW" }, \ { VIRTIO_VSOCK_OP_CREDIT_UPDATE, "CREDIT_UPDATE" }, \ { VIRTIO_VSOCK_OP_CREDIT_REQUEST, "CREDIT_REQUEST" }) TRACE_EVENT(virtio_transport_alloc_pkt, TP_PROTO( __u32 src_cid, __u32 src_port, __u32 dst_cid, __u32 dst_port, __u32 len, __u16 type, __u16 op, __u32 flags, bool zcopy ), TP_ARGS( src_cid, src_port, dst_cid, dst_port, len, type, op, flags, zcopy ), TP_STRUCT__entry( __field(__u32, src_cid) __field(__u32, src_port) __field(__u32, dst_cid) __field(__u32, dst_port) __field(__u32, len) __field(__u16, type) __field(__u16, op) __field(__u32, flags) __field(bool, zcopy) ), TP_fast_assign( __entry->src_cid = src_cid; __entry->src_port = src_port; __entry->dst_cid = dst_cid; __entry->dst_port = dst_port; __entry->len = len; __entry->type = type; __entry->op = op; __entry->flags = flags; __entry->zcopy = zcopy; ), TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x zcopy=%s", __entry->src_cid, __entry->src_port, __entry->dst_cid, __entry->dst_port, __entry->len, show_type(__entry->type), show_op(__entry->op), __entry->flags, __entry->zcopy ? "true" : "false") ); TRACE_EVENT(virtio_transport_recv_pkt, TP_PROTO( __u32 src_cid, __u32 src_port, __u32 dst_cid, __u32 dst_port, __u32 len, __u16 type, __u16 op, __u32 flags, __u32 buf_alloc, __u32 fwd_cnt ), TP_ARGS( src_cid, src_port, dst_cid, dst_port, len, type, op, flags, buf_alloc, fwd_cnt ), TP_STRUCT__entry( __field(__u32, src_cid) __field(__u32, src_port) __field(__u32, dst_cid) __field(__u32, dst_port) __field(__u32, len) __field(__u16, type) __field(__u16, op) __field(__u32, flags) __field(__u32, buf_alloc) __field(__u32, fwd_cnt) ), TP_fast_assign( __entry->src_cid = src_cid; __entry->src_port = src_port; __entry->dst_cid = dst_cid; __entry->dst_port = dst_port; __entry->len = len; __entry->type = type; __entry->op = op; __entry->flags = flags; __entry->buf_alloc = buf_alloc; __entry->fwd_cnt = fwd_cnt; ), TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x " "buf_alloc=%u fwd_cnt=%u", __entry->src_cid, __entry->src_port, __entry->dst_cid, __entry->dst_port, __entry->len, show_type(__entry->type), show_op(__entry->op), __entry->flags, __entry->buf_alloc, __entry->fwd_cnt) ); #endif /* _TRACE_VSOCK_VIRTIO_TRANSPORT_COMMON_H */ #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE vsock_virtio_transport_common /* This part must be outside protection */ #include <trace/define_trace.h> |
| 43 44 56 56 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2002-2004, Instant802 Networks, Inc. * Copyright 2008, Jouni Malinen <j@w1.fi> * Copyright (C) 2016-2017 Intel Deutschland GmbH * Copyright (C) 2020-2023 Intel Corporation */ #include <linux/netdevice.h> #include <linux/types.h> #include <linux/skbuff.h> #include <linux/compiler.h> #include <linux/ieee80211.h> #include <linux/gfp.h> #include <linux/unaligned.h> #include <net/mac80211.h> #include <crypto/aes.h> #include <crypto/utils.h> #include "ieee80211_i.h" #include "michael.h" #include "tkip.h" #include "aes_ccm.h" #include "aes_cmac.h" #include "aes_gmac.h" #include "aes_gcm.h" #include "wpa.h" ieee80211_tx_result ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) { u8 *data, *key, *mic; size_t data_len; unsigned int hdrlen; struct ieee80211_hdr *hdr; struct sk_buff *skb = tx->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int tail; hdr = (struct ieee80211_hdr *)skb->data; if (!tx->key || tx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP || skb->len < 24 || !ieee80211_is_data_present(hdr->frame_control)) return TX_CONTINUE; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < hdrlen) return TX_DROP; data = skb->data + hdrlen; data_len = skb->len - hdrlen; if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) { /* Need to use software crypto for the test */ info->control.hw_key = NULL; } if (info->control.hw_key && (info->flags & IEEE80211_TX_CTL_DONTFRAG || ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG)) && !(tx->key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE))) { /* hwaccel - with no need for SW-generated MMIC or MIC space */ return TX_CONTINUE; } tail = MICHAEL_MIC_LEN; if (!info->control.hw_key) tail += IEEE80211_TKIP_ICV_LEN; if (WARN(skb_tailroom(skb) < tail || skb_headroom(skb) < IEEE80211_TKIP_IV_LEN, "mmic: not enough head/tail (%d/%d,%d/%d)\n", skb_headroom(skb), IEEE80211_TKIP_IV_LEN, skb_tailroom(skb), tail)) return TX_DROP; mic = skb_put(skb, MICHAEL_MIC_LEN); if (tx->key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) { /* Zeroed MIC can help with debug */ memset(mic, 0, MICHAEL_MIC_LEN); return TX_CONTINUE; } key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]; michael_mic(key, hdr, data, data_len, mic); if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) mic[0]++; return TX_CONTINUE; } ieee80211_rx_result ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) { u8 *data, *key = NULL; size_t data_len; unsigned int hdrlen; u8 mic[MICHAEL_MIC_LEN]; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; /* * it makes no sense to check for MIC errors on anything other * than data frames. */ if (!ieee80211_is_data_present(hdr->frame_control)) return RX_CONTINUE; /* * No way to verify the MIC if the hardware stripped it or * the IV with the key index. In this case we have solely rely * on the driver to set RX_FLAG_MMIC_ERROR in the event of a * MIC failure report. */ if (status->flag & (RX_FLAG_MMIC_STRIPPED | RX_FLAG_IV_STRIPPED)) { if (status->flag & RX_FLAG_MMIC_ERROR) goto mic_fail_no_key; if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP) goto update_iv; return RX_CONTINUE; } /* * Some hardware seems to generate Michael MIC failure reports; even * though, the frame was not encrypted with TKIP and therefore has no * MIC. Ignore the flag them to avoid triggering countermeasures. */ if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP || !(status->flag & RX_FLAG_DECRYPTED)) return RX_CONTINUE; if (rx->sdata->vif.type == NL80211_IFTYPE_AP && rx->key->conf.keyidx) { /* * APs with pairwise keys should never receive Michael MIC * errors for non-zero keyidx because these are reserved for * group keys and only the AP is sending real multicast * frames in the BSS. */ return RX_DROP_U_AP_RX_GROUPCAST; } if (status->flag & RX_FLAG_MMIC_ERROR) goto mic_fail; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < hdrlen + MICHAEL_MIC_LEN) return RX_DROP_U_SHORT_MMIC; if (skb_linearize(rx->skb)) return RX_DROP_U_OOM; hdr = (void *)skb->data; data = skb->data + hdrlen; data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; michael_mic(key, hdr, data, data_len, mic); if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN)) goto mic_fail; /* remove Michael MIC from payload */ skb_trim(skb, skb->len - MICHAEL_MIC_LEN); update_iv: /* update IV in key information to be able to detect replays */ rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip.iv32; rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip.iv16; return RX_CONTINUE; mic_fail: rx->key->u.tkip.mic_failures++; mic_fail_no_key: /* * In some cases the key can be unset - e.g. a multicast packet, in * a driver that supports HW encryption. Send up the key idx only if * the key is set. */ cfg80211_michael_mic_failure(rx->sdata->dev, hdr->addr2, is_multicast_ether_addr(hdr->addr1) ? NL80211_KEYTYPE_GROUP : NL80211_KEYTYPE_PAIRWISE, rx->key ? rx->key->conf.keyidx : -1, NULL, GFP_ATOMIC); return RX_DROP_U_MMIC_FAIL; } static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_key *key = tx->key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); unsigned int hdrlen; int len, tail; u64 pn; u8 *pos; if (info->control.hw_key && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) { /* hwaccel - with no need for software-generated IV */ return 0; } hdrlen = ieee80211_hdrlen(hdr->frame_control); len = skb->len - hdrlen; if (info->control.hw_key) tail = 0; else tail = IEEE80211_TKIP_ICV_LEN; if (WARN_ON(skb_tailroom(skb) < tail || skb_headroom(skb) < IEEE80211_TKIP_IV_LEN)) return -1; pos = skb_push(skb, IEEE80211_TKIP_IV_LEN); memmove(pos, pos + IEEE80211_TKIP_IV_LEN, hdrlen); pos += hdrlen; /* the HW only needs room for the IV, but not the actual IV */ if (info->control.hw_key && (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return 0; /* Increase IV for the frame */ pn = atomic64_inc_return(&key->conf.tx_pn); pos = ieee80211_tkip_add_iv(pos, &key->conf, pn); /* hwaccel - with software IV */ if (info->control.hw_key) return 0; /* Add room for ICV */ skb_put(skb, IEEE80211_TKIP_ICV_LEN); return ieee80211_tkip_encrypt_data(&tx->local->wep_tx_ctx, key, skb, pos, len); } ieee80211_tx_result ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; ieee80211_tx_set_protected(tx); skb_queue_walk(&tx->skbs, skb) { if (tkip_encrypt_skb(tx, skb) < 0) return TX_DROP; } return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; int hdrlen, res, hwaccel = 0; struct ieee80211_key *key = rx->key; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); hdrlen = ieee80211_hdrlen(hdr->frame_control); if (!ieee80211_is_data(hdr->frame_control)) return RX_CONTINUE; if (!rx->sta || skb->len - hdrlen < 12) return RX_DROP_U_SHORT_TKIP; /* it may be possible to optimize this a bit more */ if (skb_linearize(rx->skb)) return RX_DROP_U_OOM; hdr = (void *)skb->data; /* * Let TKIP code verify IV, but skip decryption. * In the case where hardware checks the IV as well, * we don't even get here, see ieee80211_rx_h_decrypt() */ if (status->flag & RX_FLAG_DECRYPTED) hwaccel = 1; res = ieee80211_tkip_decrypt_data(&rx->local->wep_rx_ctx, key, skb->data + hdrlen, skb->len - hdrlen, rx->sta->sta.addr, hdr->addr1, hwaccel, rx->security_idx, &rx->tkip.iv32, &rx->tkip.iv16); if (res != TKIP_DECRYPT_OK) return RX_DROP_U_TKIP_FAIL; /* Trim ICV */ if (!(status->flag & RX_FLAG_ICV_STRIPPED)) skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN); /* Remove IV */ memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen); skb_pull(skb, IEEE80211_TKIP_IV_LEN); return RX_CONTINUE; } /* * Calculate AAD for CCMP/GCMP, returning qos_tid since we * need that in CCMP also for b_0. */ static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad, bool spp_amsdu) { struct ieee80211_hdr *hdr = (void *)skb->data; __le16 mask_fc; int a4_included, mgmt; u8 qos_tid; u16 len_a = 22; /* * Mask FC: zero subtype b4 b5 b6 (if not mgmt) * Retry, PwrMgt, MoreData, Order (if Qos Data); set Protected */ mgmt = ieee80211_is_mgmt(hdr->frame_control); mask_fc = hdr->frame_control; mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA); if (!mgmt) mask_fc &= ~cpu_to_le16(0x0070); mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); a4_included = ieee80211_has_a4(hdr->frame_control); if (a4_included) len_a += 6; if (ieee80211_is_data_qos(hdr->frame_control)) { qos_tid = *ieee80211_get_qos_ctl(hdr); if (spp_amsdu) qos_tid &= IEEE80211_QOS_CTL_TID_MASK | IEEE80211_QOS_CTL_A_MSDU_PRESENT; else qos_tid &= IEEE80211_QOS_CTL_TID_MASK; mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_ORDER); len_a += 2; } else { qos_tid = 0; } /* AAD (extra authenticate-only data) / masked 802.11 header * FC | A1 | A2 | A3 | SC | [A4] | [QC] */ put_unaligned_be16(len_a, &aad[0]); put_unaligned(mask_fc, (__le16 *)&aad[2]); memcpy(&aad[4], &hdr->addrs, 3 * ETH_ALEN); /* Mask Seq#, leave Frag# */ aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f; aad[23] = 0; if (a4_included) { memcpy(&aad[24], hdr->addr4, ETH_ALEN); aad[30] = qos_tid; aad[31] = 0; } else { memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN); aad[24] = qos_tid; } return qos_tid; } static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad, bool spp_amsdu) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; u8 qos_tid = ccmp_gcmp_aad(skb, aad, spp_amsdu); /* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC * mode authentication are not allowed to collide, yet both are derived * from this vector b_0. We only set L := 1 here to indicate that the * data size can be represented in (L+1) bytes. The CCM layer will take * care of storing the data length in the top (L+1) bytes and setting * and clearing the other bits as is required to derive the two IVs. */ b_0[0] = 0x1; /* Nonce: Nonce Flags | A2 | PN * Nonce Flags: Priority (b0..b3) | Management (b4) | Reserved (b5..b7) */ b_0[1] = qos_tid | (ieee80211_is_mgmt(hdr->frame_control) << 4); memcpy(&b_0[2], hdr->addr2, ETH_ALEN); memcpy(&b_0[8], pn, IEEE80211_CCMP_PN_LEN); } static inline void ccmp_pn2hdr(u8 *hdr, u8 *pn, int key_id) { hdr[0] = pn[5]; hdr[1] = pn[4]; hdr[2] = 0; hdr[3] = 0x20 | (key_id << 6); hdr[4] = pn[3]; hdr[5] = pn[2]; hdr[6] = pn[1]; hdr[7] = pn[0]; } static inline void ccmp_hdr2pn(u8 *pn, u8 *hdr) { pn[0] = hdr[7]; pn[1] = hdr[6]; pn[2] = hdr[5]; pn[3] = hdr[4]; pn[4] = hdr[1]; pn[5] = hdr[0]; } static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb, unsigned int mic_len) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_key *key = tx->key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int hdrlen, len, tail; u8 *pos; u8 pn[6]; u64 pn64; u8 aad[CCM_AAD_LEN]; u8 b_0[AES_BLOCK_SIZE]; if (info->control.hw_key && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && !((info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) && ieee80211_is_mgmt(hdr->frame_control))) { /* * hwaccel has no need for preallocated room for CCMP * header or MIC fields */ return 0; } hdrlen = ieee80211_hdrlen(hdr->frame_control); len = skb->len - hdrlen; if (info->control.hw_key) tail = 0; else tail = mic_len; if (WARN_ON(skb_tailroom(skb) < tail || skb_headroom(skb) < IEEE80211_CCMP_HDR_LEN)) return -1; pos = skb_push(skb, IEEE80211_CCMP_HDR_LEN); memmove(pos, pos + IEEE80211_CCMP_HDR_LEN, hdrlen); /* the HW only needs room for the IV, but not the actual IV */ if (info->control.hw_key && (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return 0; pos += hdrlen; pn64 = atomic64_inc_return(&key->conf.tx_pn); pn[5] = pn64; pn[4] = pn64 >> 8; pn[3] = pn64 >> 16; pn[2] = pn64 >> 24; pn[1] = pn64 >> 32; pn[0] = pn64 >> 40; ccmp_pn2hdr(pos, pn, key->conf.keyidx); /* hwaccel - with software CCMP header */ if (info->control.hw_key) return 0; pos += IEEE80211_CCMP_HDR_LEN; ccmp_special_blocks(skb, pn, b_0, aad, key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU); return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len, skb_put(skb, mic_len)); } ieee80211_tx_result ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx, unsigned int mic_len) { struct sk_buff *skb; ieee80211_tx_set_protected(tx); skb_queue_walk(&tx->skbs, skb) { if (ccmp_encrypt_skb(tx, skb, mic_len) < 0) return TX_DROP; } return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx, unsigned int mic_len) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; int hdrlen; struct ieee80211_key *key = rx->key; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); u8 pn[IEEE80211_CCMP_PN_LEN]; int data_len; int queue; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (!ieee80211_is_data(hdr->frame_control) && !ieee80211_is_robust_mgmt_frame(skb)) return RX_CONTINUE; if (status->flag & RX_FLAG_DECRYPTED) { if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN)) return RX_DROP_U_SHORT_CCMP; if (status->flag & RX_FLAG_MIC_STRIPPED) mic_len = 0; } else { if (skb_linearize(rx->skb)) return RX_DROP_U_OOM; } /* reload hdr - skb might have been reallocated */ hdr = (void *)rx->skb->data; data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len; if (!rx->sta || data_len < 0) return RX_DROP_U_SHORT_CCMP; if (!(status->flag & RX_FLAG_PN_VALIDATED)) { int res; ccmp_hdr2pn(pn, skb->data + hdrlen); queue = rx->security_idx; res = memcmp(pn, key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN); if (res < 0 || (!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) { key->u.ccmp.replays++; return RX_DROP_U_REPLAY; } if (!(status->flag & RX_FLAG_DECRYPTED)) { u8 aad[2 * AES_BLOCK_SIZE]; u8 b_0[AES_BLOCK_SIZE]; /* hardware didn't decrypt/verify MIC */ ccmp_special_blocks(skb, pn, b_0, aad, key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU); if (ieee80211_aes_ccm_decrypt( key->u.ccmp.tfm, b_0, aad, skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN, data_len, skb->data + skb->len - mic_len)) return RX_DROP_U_MIC_FAIL; } memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN); if (unlikely(ieee80211_is_frag(hdr))) memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN); } /* Remove CCMP header and MIC */ if (pskb_trim(skb, skb->len - mic_len)) return RX_DROP_U_SHORT_CCMP_MIC; memmove(skb->data + IEEE80211_CCMP_HDR_LEN, skb->data, hdrlen); skb_pull(skb, IEEE80211_CCMP_HDR_LEN); return RX_CONTINUE; } static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad, bool spp_amsdu) { struct ieee80211_hdr *hdr = (void *)skb->data; memcpy(j_0, hdr->addr2, ETH_ALEN); memcpy(&j_0[ETH_ALEN], pn, IEEE80211_GCMP_PN_LEN); ccmp_gcmp_aad(skb, aad, spp_amsdu); } static inline void gcmp_pn2hdr(u8 *hdr, const u8 *pn, int key_id) { hdr[0] = pn[5]; hdr[1] = pn[4]; hdr[2] = 0; hdr[3] = 0x20 | (key_id << 6); hdr[4] = pn[3]; hdr[5] = pn[2]; hdr[6] = pn[1]; hdr[7] = pn[0]; } static inline void gcmp_hdr2pn(u8 *pn, const u8 *hdr) { pn[0] = hdr[7]; pn[1] = hdr[6]; pn[2] = hdr[5]; pn[3] = hdr[4]; pn[4] = hdr[1]; pn[5] = hdr[0]; } static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_key *key = tx->key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int hdrlen, len, tail; u8 *pos; u8 pn[6]; u64 pn64; u8 aad[GCM_AAD_LEN]; u8 j_0[AES_BLOCK_SIZE]; if (info->control.hw_key && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && !((info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) && ieee80211_is_mgmt(hdr->frame_control))) { /* hwaccel has no need for preallocated room for GCMP * header or MIC fields */ return 0; } hdrlen = ieee80211_hdrlen(hdr->frame_control); len = skb->len - hdrlen; if (info->control.hw_key) tail = 0; else tail = IEEE80211_GCMP_MIC_LEN; if (WARN_ON(skb_tailroom(skb) < tail || skb_headroom(skb) < IEEE80211_GCMP_HDR_LEN)) return -1; pos = skb_push(skb, IEEE80211_GCMP_HDR_LEN); memmove(pos, pos + IEEE80211_GCMP_HDR_LEN, hdrlen); skb_set_network_header(skb, skb_network_offset(skb) + IEEE80211_GCMP_HDR_LEN); /* the HW only needs room for the IV, but not the actual IV */ if (info->control.hw_key && (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return 0; pos += hdrlen; pn64 = atomic64_inc_return(&key->conf.tx_pn); pn[5] = pn64; pn[4] = pn64 >> 8; pn[3] = pn64 >> 16; pn[2] = pn64 >> 24; pn[1] = pn64 >> 32; pn[0] = pn64 >> 40; gcmp_pn2hdr(pos, pn, key->conf.keyidx); /* hwaccel - with software GCMP header */ if (info->control.hw_key) return 0; pos += IEEE80211_GCMP_HDR_LEN; gcmp_special_blocks(skb, pn, j_0, aad, key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU); return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len, skb_put(skb, IEEE80211_GCMP_MIC_LEN)); } ieee80211_tx_result ieee80211_crypto_gcmp_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; ieee80211_tx_set_protected(tx); skb_queue_walk(&tx->skbs, skb) { if (gcmp_encrypt_skb(tx, skb) < 0) return TX_DROP; } return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; int hdrlen; struct ieee80211_key *key = rx->key; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); u8 pn[IEEE80211_GCMP_PN_LEN]; int data_len, queue, mic_len = IEEE80211_GCMP_MIC_LEN; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (!ieee80211_is_data(hdr->frame_control) && !ieee80211_is_robust_mgmt_frame(skb)) return RX_CONTINUE; if (status->flag & RX_FLAG_DECRYPTED) { if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN)) return RX_DROP_U_SHORT_GCMP; if (status->flag & RX_FLAG_MIC_STRIPPED) mic_len = 0; } else { if (skb_linearize(rx->skb)) return RX_DROP_U_OOM; } /* reload hdr - skb might have been reallocated */ hdr = (void *)rx->skb->data; data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len; if (!rx->sta || data_len < 0) return RX_DROP_U_SHORT_GCMP; if (!(status->flag & RX_FLAG_PN_VALIDATED)) { int res; gcmp_hdr2pn(pn, skb->data + hdrlen); queue = rx->security_idx; res = memcmp(pn, key->u.gcmp.rx_pn[queue], IEEE80211_GCMP_PN_LEN); if (res < 0 || (!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) { key->u.gcmp.replays++; return RX_DROP_U_REPLAY; } if (!(status->flag & RX_FLAG_DECRYPTED)) { u8 aad[2 * AES_BLOCK_SIZE]; u8 j_0[AES_BLOCK_SIZE]; /* hardware didn't decrypt/verify MIC */ gcmp_special_blocks(skb, pn, j_0, aad, key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU); if (ieee80211_aes_gcm_decrypt( key->u.gcmp.tfm, j_0, aad, skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN, data_len, skb->data + skb->len - IEEE80211_GCMP_MIC_LEN)) return RX_DROP_U_MIC_FAIL; } memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN); if (unlikely(ieee80211_is_frag(hdr))) memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN); } /* Remove GCMP header and MIC */ if (pskb_trim(skb, skb->len - mic_len)) return RX_DROP_U_SHORT_GCMP_MIC; memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen); skb_pull(skb, IEEE80211_GCMP_HDR_LEN); return RX_CONTINUE; } static void bip_aad(struct sk_buff *skb, u8 *aad) { __le16 mask_fc; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; /* BIP AAD: FC(masked) || A1 || A2 || A3 */ /* FC type/subtype */ /* Mask FC Retry, PwrMgt, MoreData flags to zero */ mask_fc = hdr->frame_control; mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA); put_unaligned(mask_fc, (__le16 *) &aad[0]); /* A1 || A2 || A3 */ memcpy(aad + 2, &hdr->addrs, 3 * ETH_ALEN); } static inline void bip_ipn_set64(u8 *d, u64 pn) { *d++ = pn; *d++ = pn >> 8; *d++ = pn >> 16; *d++ = pn >> 24; *d++ = pn >> 32; *d = pn >> 40; } static inline void bip_ipn_swap(u8 *d, const u8 *s) { *d++ = s[5]; *d++ = s[4]; *d++ = s[3]; *d++ = s[2]; *d++ = s[1]; *d = s[0]; } ieee80211_tx_result ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; struct ieee80211_tx_info *info; struct ieee80211_key *key = tx->key; struct ieee80211_mmie *mmie; u8 aad[20]; u64 pn64; if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) return TX_DROP; skb = skb_peek(&tx->skbs); info = IEEE80211_SKB_CB(skb); if (info->control.hw_key && !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIE)) return TX_CONTINUE; if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) return TX_DROP; mmie = skb_put(skb, sizeof(*mmie)); mmie->element_id = WLAN_EID_MMIE; mmie->length = sizeof(*mmie) - 2; mmie->key_id = cpu_to_le16(key->conf.keyidx); /* PN = PN + 1 */ pn64 = atomic64_inc_return(&key->conf.tx_pn); bip_ipn_set64(mmie->sequence_number, pn64); if (info->control.hw_key) return TX_CONTINUE; bip_aad(skb, aad); /* * MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) */ ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, skb->data + 24, skb->len - 24, mmie->mic); return TX_CONTINUE; } ieee80211_tx_result ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; struct ieee80211_tx_info *info; struct ieee80211_key *key = tx->key; struct ieee80211_mmie_16 *mmie; u8 aad[20]; u64 pn64; if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) return TX_DROP; skb = skb_peek(&tx->skbs); info = IEEE80211_SKB_CB(skb); if (info->control.hw_key && !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIE)) return TX_CONTINUE; if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) return TX_DROP; mmie = skb_put(skb, sizeof(*mmie)); mmie->element_id = WLAN_EID_MMIE; mmie->length = sizeof(*mmie) - 2; mmie->key_id = cpu_to_le16(key->conf.keyidx); /* PN = PN + 1 */ pn64 = atomic64_inc_return(&key->conf.tx_pn); bip_ipn_set64(mmie->sequence_number, pn64); if (info->control.hw_key) return TX_CONTINUE; bip_aad(skb, aad); /* MIC = AES-256-CMAC(IGTK, AAD || Management Frame Body || MMIE, 128) */ ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, skb->data + 24, skb->len - 24, mmie->mic); return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx) { struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_key *key = rx->key; struct ieee80211_mmie *mmie; u8 aad[20], mic[8], ipn[6]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; if (!ieee80211_is_mgmt(hdr->frame_control)) return RX_CONTINUE; /* management frames are already linear */ if (skb->len < 24 + sizeof(*mmie)) return RX_DROP_U_SHORT_CMAC; mmie = (struct ieee80211_mmie *) (skb->data + skb->len - sizeof(*mmie)); if (mmie->element_id != WLAN_EID_MMIE || mmie->length != sizeof(*mmie) - 2) return RX_DROP_U_BAD_MMIE; /* Invalid MMIE */ bip_ipn_swap(ipn, mmie->sequence_number); if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) { key->u.aes_cmac.replays++; return RX_DROP_U_REPLAY; } if (!(status->flag & RX_FLAG_DECRYPTED)) { /* hardware didn't decrypt/verify MIC */ bip_aad(skb, aad); ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, skb->data + 24, skb->len - 24, mic); if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { key->u.aes_cmac.icverrors++; return RX_DROP_U_MIC_FAIL; } } memcpy(key->u.aes_cmac.rx_pn, ipn, 6); /* Remove MMIE */ skb_trim(skb, skb->len - sizeof(*mmie)); return RX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx) { struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_key *key = rx->key; struct ieee80211_mmie_16 *mmie; u8 aad[20], mic[16], ipn[6]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; if (!ieee80211_is_mgmt(hdr->frame_control)) return RX_CONTINUE; /* management frames are already linear */ if (skb->len < 24 + sizeof(*mmie)) return RX_DROP_U_SHORT_CMAC256; mmie = (struct ieee80211_mmie_16 *) (skb->data + skb->len - sizeof(*mmie)); if (mmie->element_id != WLAN_EID_MMIE || mmie->length != sizeof(*mmie) - 2) return RX_DROP_U_BAD_MMIE; /* Invalid MMIE */ bip_ipn_swap(ipn, mmie->sequence_number); if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) { key->u.aes_cmac.replays++; return RX_DROP_U_REPLAY; } if (!(status->flag & RX_FLAG_DECRYPTED)) { /* hardware didn't decrypt/verify MIC */ bip_aad(skb, aad); ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, skb->data + 24, skb->len - 24, mic); if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { key->u.aes_cmac.icverrors++; return RX_DROP_U_MIC_FAIL; } } memcpy(key->u.aes_cmac.rx_pn, ipn, 6); /* Remove MMIE */ skb_trim(skb, skb->len - sizeof(*mmie)); return RX_CONTINUE; } ieee80211_tx_result ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; struct ieee80211_tx_info *info; struct ieee80211_key *key = tx->key; struct ieee80211_mmie_16 *mmie; struct ieee80211_hdr *hdr; u8 aad[GMAC_AAD_LEN]; u64 pn64; u8 nonce[GMAC_NONCE_LEN]; if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) return TX_DROP; skb = skb_peek(&tx->skbs); info = IEEE80211_SKB_CB(skb); if (info->control.hw_key && !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIE)) return TX_CONTINUE; if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) return TX_DROP; mmie = skb_put(skb, sizeof(*mmie)); mmie->element_id = WLAN_EID_MMIE; mmie->length = sizeof(*mmie) - 2; mmie->key_id = cpu_to_le16(key->conf.keyidx); /* PN = PN + 1 */ pn64 = atomic64_inc_return(&key->conf.tx_pn); bip_ipn_set64(mmie->sequence_number, pn64); if (info->control.hw_key) return TX_CONTINUE; bip_aad(skb, aad); hdr = (struct ieee80211_hdr *)skb->data; memcpy(nonce, hdr->addr2, ETH_ALEN); bip_ipn_swap(nonce + ETH_ALEN, mmie->sequence_number); /* MIC = AES-GMAC(IGTK, AAD || Management Frame Body || MMIE, 128) */ if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, skb->data + 24, skb->len - 24, mmie->mic) < 0) return TX_DROP; return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx) { struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_key *key = rx->key; struct ieee80211_mmie_16 *mmie; u8 aad[GMAC_AAD_LEN], *mic, ipn[6], nonce[GMAC_NONCE_LEN]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; if (!ieee80211_is_mgmt(hdr->frame_control)) return RX_CONTINUE; /* management frames are already linear */ if (skb->len < 24 + sizeof(*mmie)) return RX_DROP_U_SHORT_GMAC; mmie = (struct ieee80211_mmie_16 *) (skb->data + skb->len - sizeof(*mmie)); if (mmie->element_id != WLAN_EID_MMIE || mmie->length != sizeof(*mmie) - 2) return RX_DROP_U_BAD_MMIE; /* Invalid MMIE */ bip_ipn_swap(ipn, mmie->sequence_number); if (memcmp(ipn, key->u.aes_gmac.rx_pn, 6) <= 0) { key->u.aes_gmac.replays++; return RX_DROP_U_REPLAY; } if (!(status->flag & RX_FLAG_DECRYPTED)) { /* hardware didn't decrypt/verify MIC */ bip_aad(skb, aad); memcpy(nonce, hdr->addr2, ETH_ALEN); memcpy(nonce + ETH_ALEN, ipn, 6); mic = kmalloc(GMAC_MIC_LEN, GFP_ATOMIC); if (!mic) return RX_DROP_U_OOM; if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, skb->data + 24, skb->len - 24, mic) < 0 || crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { key->u.aes_gmac.icverrors++; kfree(mic); return RX_DROP_U_MIC_FAIL; } kfree(mic); } memcpy(key->u.aes_gmac.rx_pn, ipn, 6); /* Remove MMIE */ skb_trim(skb, skb->len - sizeof(*mmie)); return RX_CONTINUE; } |
| 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 | // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) B.A.T.M.A.N. contributors: * * Antonio Quartulli */ #include "bat_v_ogm.h" #include "main.h" #include <linux/atomic.h> #include <linux/byteorder/generic.h> #include <linux/container_of.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/gfp.h> #include <linux/if_ether.h> #include <linux/jiffies.h> #include <linux/kref.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/minmax.h> #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/random.h> #include <linux/rcupdate.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/types.h> #include <linux/workqueue.h> #include <uapi/linux/batadv_packet.h> #include "hard-interface.h" #include "hash.h" #include "log.h" #include "originator.h" #include "routing.h" #include "send.h" #include "translation-table.h" #include "tvlv.h" /** * batadv_v_ogm_orig_get() - retrieve and possibly create an originator node * @bat_priv: the bat priv with all the mesh interface information * @addr: the address of the originator * * Return: the orig_node corresponding to the specified address. If such an * object does not exist, it is allocated here. In case of allocation failure * returns NULL. */ struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) { struct batadv_orig_node *orig_node; int hash_added; orig_node = batadv_orig_hash_find(bat_priv, addr); if (orig_node) return orig_node; orig_node = batadv_orig_node_new(bat_priv, addr); if (!orig_node) return NULL; kref_get(&orig_node->refcount); hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, batadv_choose_orig, orig_node, &orig_node->hash_entry); if (hash_added != 0) { /* remove refcnt for newly created orig_node and hash entry */ batadv_orig_node_put(orig_node); batadv_orig_node_put(orig_node); orig_node = NULL; } return orig_node; } /** * batadv_v_ogm_start_queue_timer() - restart the OGM aggregation timer * @hard_iface: the interface to use to send the OGM */ static void batadv_v_ogm_start_queue_timer(struct batadv_hard_iface *hard_iface) { unsigned int msecs = BATADV_MAX_AGGREGATION_MS * 1000; /* msecs * [0.9, 1.1] */ msecs += get_random_u32_below(msecs / 5) - (msecs / 10); queue_delayed_work(batadv_event_workqueue, &hard_iface->bat_v.aggr_wq, msecs_to_jiffies(msecs / 1000)); } /** * batadv_v_ogm_start_timer() - restart the OGM sending timer * @bat_priv: the bat priv with all the mesh interface information */ static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv) { unsigned long msecs; /* this function may be invoked in different contexts (ogm rescheduling * or hard_iface activation), but the work timer should not be reset */ if (delayed_work_pending(&bat_priv->bat_v.ogm_wq)) return; msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER; msecs += get_random_u32_below(2 * BATADV_JITTER); queue_delayed_work(batadv_event_workqueue, &bat_priv->bat_v.ogm_wq, msecs_to_jiffies(msecs)); } /** * batadv_v_ogm_send_to_if() - send a batman ogm using a given interface * @skb: the OGM to send * @hard_iface: the interface to use to send the OGM */ static void batadv_v_ogm_send_to_if(struct sk_buff *skb, struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); if (hard_iface->if_status != BATADV_IF_ACTIVE) { kfree_skb(skb); return; } batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES, skb->len + ETH_HLEN); batadv_send_broadcast_skb(skb, hard_iface); } /** * batadv_v_ogm_len() - OGMv2 packet length * @skb: the OGM to check * * Return: Length of the given OGMv2 packet, including tvlv length, excluding * ethernet header length. */ static unsigned int batadv_v_ogm_len(struct sk_buff *skb) { struct batadv_ogm2_packet *ogm_packet; ogm_packet = (struct batadv_ogm2_packet *)skb->data; return BATADV_OGM2_HLEN + ntohs(ogm_packet->tvlv_len); } /** * batadv_v_ogm_queue_left() - check if given OGM still fits aggregation queue * @skb: the OGM to check * @hard_iface: the interface to use to send the OGM * * Caller needs to hold the hard_iface->bat_v.aggr_list.lock. * * Return: True, if the given OGMv2 packet still fits, false otherwise. */ static bool batadv_v_ogm_queue_left(struct sk_buff *skb, struct batadv_hard_iface *hard_iface) { unsigned int max = min_t(unsigned int, hard_iface->net_dev->mtu, BATADV_MAX_AGGREGATION_BYTES); unsigned int ogm_len = batadv_v_ogm_len(skb); lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock); return hard_iface->bat_v.aggr_len + ogm_len <= max; } /** * batadv_v_ogm_aggr_list_free - free all elements in an aggregation queue * @hard_iface: the interface holding the aggregation queue * * Empties the OGMv2 aggregation queue and frees all the skbs it contains. * * Caller needs to hold the hard_iface->bat_v.aggr_list.lock. */ static void batadv_v_ogm_aggr_list_free(struct batadv_hard_iface *hard_iface) { lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock); __skb_queue_purge(&hard_iface->bat_v.aggr_list); hard_iface->bat_v.aggr_len = 0; } /** * batadv_v_ogm_aggr_send() - flush & send aggregation queue * @hard_iface: the interface with the aggregation queue to flush * * Aggregates all OGMv2 packets currently in the aggregation queue into a * single OGMv2 packet and transmits this aggregate. * * The aggregation queue is empty after this call. * * Caller needs to hold the hard_iface->bat_v.aggr_list.lock. */ static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface) { unsigned int aggr_len = hard_iface->bat_v.aggr_len; struct sk_buff *skb_aggr; unsigned int ogm_len; struct sk_buff *skb; lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock); if (!aggr_len) return; skb_aggr = dev_alloc_skb(aggr_len + ETH_HLEN + NET_IP_ALIGN); if (!skb_aggr) { batadv_v_ogm_aggr_list_free(hard_iface); return; } skb_reserve(skb_aggr, ETH_HLEN + NET_IP_ALIGN); skb_reset_network_header(skb_aggr); while ((skb = __skb_dequeue(&hard_iface->bat_v.aggr_list))) { hard_iface->bat_v.aggr_len -= batadv_v_ogm_len(skb); ogm_len = batadv_v_ogm_len(skb); skb_put_data(skb_aggr, skb->data, ogm_len); consume_skb(skb); } batadv_v_ogm_send_to_if(skb_aggr, hard_iface); } /** * batadv_v_ogm_queue_on_if() - queue a batman ogm on a given interface * @skb: the OGM to queue * @hard_iface: the interface to queue the OGM on */ static void batadv_v_ogm_queue_on_if(struct sk_buff *skb, struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); if (!atomic_read(&bat_priv->aggregated_ogms)) { batadv_v_ogm_send_to_if(skb, hard_iface); return; } spin_lock_bh(&hard_iface->bat_v.aggr_list.lock); if (!batadv_v_ogm_queue_left(skb, hard_iface)) batadv_v_ogm_aggr_send(hard_iface); hard_iface->bat_v.aggr_len += batadv_v_ogm_len(skb); __skb_queue_tail(&hard_iface->bat_v.aggr_list, skb); spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock); } /** * batadv_v_ogm_send_meshif() - periodic worker broadcasting the own OGM * @bat_priv: the bat priv with all the mesh interface information */ static void batadv_v_ogm_send_meshif(struct batadv_priv *bat_priv) { struct batadv_hard_iface *hard_iface; struct batadv_ogm2_packet *ogm_packet; struct sk_buff *skb, *skb_tmp; unsigned char *ogm_buff; struct list_head *iter; int ogm_buff_len; u16 tvlv_len = 0; int ret; lockdep_assert_held(&bat_priv->bat_v.ogm_buff_mutex); if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) goto out; ogm_buff = bat_priv->bat_v.ogm_buff; ogm_buff_len = bat_priv->bat_v.ogm_buff_len; /* tt changes have to be committed before the tvlv data is * appended as it may alter the tt tvlv container */ batadv_tt_local_commit_changes(bat_priv); tvlv_len = batadv_tvlv_container_ogm_append(bat_priv, &ogm_buff, &ogm_buff_len, BATADV_OGM2_HLEN); bat_priv->bat_v.ogm_buff = ogm_buff; bat_priv->bat_v.ogm_buff_len = ogm_buff_len; skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + ogm_buff_len); if (!skb) goto reschedule; skb_reserve(skb, ETH_HLEN); skb_put_data(skb, ogm_buff, ogm_buff_len); ogm_packet = (struct batadv_ogm2_packet *)skb->data; ogm_packet->seqno = htonl(atomic_read(&bat_priv->bat_v.ogm_seqno)); atomic_inc(&bat_priv->bat_v.ogm_seqno); ogm_packet->tvlv_len = htons(tvlv_len); /* broadcast on every interface */ rcu_read_lock(); netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) { if (!kref_get_unless_zero(&hard_iface->refcount)) continue; ret = batadv_hardif_no_broadcast(hard_iface, NULL, NULL); if (ret) { char *type; switch (ret) { case BATADV_HARDIF_BCAST_NORECIPIENT: type = "no neighbor"; break; case BATADV_HARDIF_BCAST_DUPFWD: type = "single neighbor is source"; break; case BATADV_HARDIF_BCAST_DUPORIG: type = "single neighbor is originator"; break; default: type = "unknown"; } batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "OGM2 from ourselves on %s suppressed: %s\n", hard_iface->net_dev->name, type); batadv_hardif_put(hard_iface); continue; } batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Sending own OGM2 packet (originator %pM, seqno %u, throughput %u, TTL %d) on interface %s [%pM]\n", ogm_packet->orig, ntohl(ogm_packet->seqno), ntohl(ogm_packet->throughput), ogm_packet->ttl, hard_iface->net_dev->name, hard_iface->net_dev->dev_addr); /* this skb gets consumed by batadv_v_ogm_send_to_if() */ skb_tmp = skb_clone(skb, GFP_ATOMIC); if (!skb_tmp) { batadv_hardif_put(hard_iface); break; } batadv_v_ogm_queue_on_if(skb_tmp, hard_iface); batadv_hardif_put(hard_iface); } rcu_read_unlock(); consume_skb(skb); reschedule: batadv_v_ogm_start_timer(bat_priv); out: return; } /** * batadv_v_ogm_send() - periodic worker broadcasting the own OGM * @work: work queue item */ static void batadv_v_ogm_send(struct work_struct *work) { struct batadv_priv_bat_v *bat_v; struct batadv_priv *bat_priv; bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work); bat_priv = container_of(bat_v, struct batadv_priv, bat_v); mutex_lock(&bat_priv->bat_v.ogm_buff_mutex); batadv_v_ogm_send_meshif(bat_priv); mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex); } /** * batadv_v_ogm_aggr_work() - OGM queue periodic task per interface * @work: work queue item * * Emits aggregated OGM messages in regular intervals. */ void batadv_v_ogm_aggr_work(struct work_struct *work) { struct batadv_hard_iface_bat_v *batv; struct batadv_hard_iface *hard_iface; batv = container_of(work, struct batadv_hard_iface_bat_v, aggr_wq.work); hard_iface = container_of(batv, struct batadv_hard_iface, bat_v); spin_lock_bh(&hard_iface->bat_v.aggr_list.lock); batadv_v_ogm_aggr_send(hard_iface); spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock); batadv_v_ogm_start_queue_timer(hard_iface); } /** * batadv_v_ogm_iface_enable() - prepare an interface for B.A.T.M.A.N. V * @hard_iface: the interface to prepare * * Takes care of scheduling its own OGM sending routine for this interface. * * Return: 0 on success or a negative error code otherwise */ int batadv_v_ogm_iface_enable(struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); batadv_v_ogm_start_queue_timer(hard_iface); batadv_v_ogm_start_timer(bat_priv); return 0; } /** * batadv_v_ogm_iface_disable() - release OGM interface private resources * @hard_iface: interface for which the resources have to be released */ void batadv_v_ogm_iface_disable(struct batadv_hard_iface *hard_iface) { cancel_delayed_work_sync(&hard_iface->bat_v.aggr_wq); spin_lock_bh(&hard_iface->bat_v.aggr_list.lock); batadv_v_ogm_aggr_list_free(hard_iface); spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock); } /** * batadv_v_ogm_primary_iface_set() - set a new primary interface * @primary_iface: the new primary interface */ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface) { struct batadv_priv *bat_priv = netdev_priv(primary_iface->mesh_iface); struct batadv_ogm2_packet *ogm_packet; mutex_lock(&bat_priv->bat_v.ogm_buff_mutex); if (!bat_priv->bat_v.ogm_buff) goto unlock; ogm_packet = (struct batadv_ogm2_packet *)bat_priv->bat_v.ogm_buff; ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr); unlock: mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex); } /** * batadv_v_forward_penalty() - apply a penalty to the throughput metric * forwarded with B.A.T.M.A.N. V OGMs * @bat_priv: the bat priv with all the mesh interface information * @if_incoming: the interface where the OGM has been received * @if_outgoing: the interface where the OGM has to be forwarded to * @throughput: the current throughput * * Apply a penalty on the current throughput metric value based on the * characteristic of the interface where the OGM has been received. * * Initially the per hardif hop penalty is applied to the throughput. After * that the return value is then computed as follows: * - throughput * 50% if the incoming and outgoing interface are the * same WiFi interface and the throughput is above * 1MBit/s * - throughput if the outgoing interface is the default * interface (i.e. this OGM is processed for the * internal table and not forwarded) * - throughput * node hop penalty otherwise * * Return: the penalised throughput metric. */ static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing, u32 throughput) { int if_hop_penalty = atomic_read(&if_incoming->hop_penalty); int hop_penalty = atomic_read(&bat_priv->hop_penalty); int hop_penalty_max = BATADV_TQ_MAX_VALUE; /* Apply per hardif hop penalty */ throughput = throughput * (hop_penalty_max - if_hop_penalty) / hop_penalty_max; /* Don't apply hop penalty in default originator table. */ if (if_outgoing == BATADV_IF_DEFAULT) return throughput; /* Forwarding on the same WiFi interface cuts the throughput in half * due to the store & forward characteristics of WIFI. * Very low throughput values are the exception. */ if (throughput > 10 && if_incoming == if_outgoing && !(if_incoming->bat_v.flags & BATADV_FULL_DUPLEX)) return throughput / 2; /* hop penalty of 255 equals 100% */ return throughput * (hop_penalty_max - hop_penalty) / hop_penalty_max; } /** * batadv_v_ogm_forward() - check conditions and forward an OGM to the given * outgoing interface * @bat_priv: the bat priv with all the mesh interface information * @ogm_received: previously received OGM to be forwarded * @orig_node: the originator which has been updated * @neigh_node: the neigh_node through with the OGM has been received * @if_incoming: the interface on which this OGM was received on * @if_outgoing: the interface to which the OGM has to be forwarded to * * Forward an OGM to an interface after having altered the throughput metric and * the TTL value contained in it. The original OGM isn't modified. */ static void batadv_v_ogm_forward(struct batadv_priv *bat_priv, const struct batadv_ogm2_packet *ogm_received, struct batadv_orig_node *orig_node, struct batadv_neigh_node *neigh_node, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { struct batadv_neigh_ifinfo *neigh_ifinfo = NULL; struct batadv_orig_ifinfo *orig_ifinfo = NULL; struct batadv_neigh_node *router = NULL; struct batadv_ogm2_packet *ogm_forward; unsigned char *skb_buff; struct sk_buff *skb; size_t packet_len; u16 tvlv_len; /* only forward for specific interfaces, not for the default one. */ if (if_outgoing == BATADV_IF_DEFAULT) goto out; orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing); if (!orig_ifinfo) goto out; /* acquire possibly updated router */ router = batadv_orig_router_get(orig_node, if_outgoing); /* strict rule: forward packets coming from the best next hop only */ if (neigh_node != router) goto out; /* don't forward the same seqno twice on one interface */ if (orig_ifinfo->last_seqno_forwarded == ntohl(ogm_received->seqno)) goto out; orig_ifinfo->last_seqno_forwarded = ntohl(ogm_received->seqno); if (ogm_received->ttl <= 1) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n"); goto out; } neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing); if (!neigh_ifinfo) goto out; tvlv_len = ntohs(ogm_received->tvlv_len); packet_len = BATADV_OGM2_HLEN + tvlv_len; skb = netdev_alloc_skb_ip_align(if_outgoing->net_dev, ETH_HLEN + packet_len); if (!skb) goto out; skb_reserve(skb, ETH_HLEN); skb_buff = skb_put_data(skb, ogm_received, packet_len); /* apply forward penalty */ ogm_forward = (struct batadv_ogm2_packet *)skb_buff; ogm_forward->throughput = htonl(neigh_ifinfo->bat_v.throughput); ogm_forward->ttl--; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Forwarding OGM2 packet on %s: throughput %u, ttl %u, received via %s\n", if_outgoing->net_dev->name, ntohl(ogm_forward->throughput), ogm_forward->ttl, if_incoming->net_dev->name); batadv_v_ogm_queue_on_if(skb, if_outgoing); out: batadv_orig_ifinfo_put(orig_ifinfo); batadv_neigh_node_put(router); batadv_neigh_ifinfo_put(neigh_ifinfo); } /** * batadv_v_ogm_metric_update() - update route metric based on OGM * @bat_priv: the bat priv with all the mesh interface information * @ogm2: OGM2 structure * @orig_node: Originator structure for which the OGM has been received * @neigh_node: the neigh_node through with the OGM has been received * @if_incoming: the interface where this packet was received * @if_outgoing: the interface for which the packet should be considered * * Return: * 1 if the OGM is new, * 0 if it is not new but valid, * <0 on error (e.g. old OGM) */ static int batadv_v_ogm_metric_update(struct batadv_priv *bat_priv, const struct batadv_ogm2_packet *ogm2, struct batadv_orig_node *orig_node, struct batadv_neigh_node *neigh_node, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { struct batadv_orig_ifinfo *orig_ifinfo; struct batadv_neigh_ifinfo *neigh_ifinfo = NULL; bool protection_started = false; int ret = -EINVAL; u32 path_throughput; s32 seq_diff; orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing); if (!orig_ifinfo) goto out; seq_diff = ntohl(ogm2->seqno) - orig_ifinfo->last_real_seqno; if (!hlist_empty(&orig_node->neigh_list) && batadv_window_protected(bat_priv, seq_diff, BATADV_OGM_MAX_AGE, &orig_ifinfo->batman_seqno_reset, &protection_started)) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: packet within window protection time from %pM\n", ogm2->orig); batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Last reset: %ld, %ld\n", orig_ifinfo->batman_seqno_reset, jiffies); goto out; } /* drop packets with old seqnos, however accept the first packet after * a host has been rebooted. */ if (seq_diff < 0 && !protection_started) goto out; neigh_node->last_seen = jiffies; orig_node->last_seen = jiffies; orig_ifinfo->last_real_seqno = ntohl(ogm2->seqno); orig_ifinfo->last_ttl = ogm2->ttl; neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing); if (!neigh_ifinfo) goto out; path_throughput = batadv_v_forward_penalty(bat_priv, if_incoming, if_outgoing, ntohl(ogm2->throughput)); neigh_ifinfo->bat_v.throughput = path_throughput; neigh_ifinfo->bat_v.last_seqno = ntohl(ogm2->seqno); neigh_ifinfo->last_ttl = ogm2->ttl; if (seq_diff > 0 || protection_started) ret = 1; else ret = 0; out: batadv_orig_ifinfo_put(orig_ifinfo); batadv_neigh_ifinfo_put(neigh_ifinfo); return ret; } /** * batadv_v_ogm_route_update() - update routes based on OGM * @bat_priv: the bat priv with all the mesh interface information * @ethhdr: the Ethernet header of the OGM2 * @ogm2: OGM2 structure * @orig_node: Originator structure for which the OGM has been received * @neigh_node: the neigh_node through with the OGM has been received * @if_incoming: the interface where this packet was received * @if_outgoing: the interface for which the packet should be considered * * Return: true if the packet should be forwarded, false otherwise */ static bool batadv_v_ogm_route_update(struct batadv_priv *bat_priv, const struct ethhdr *ethhdr, const struct batadv_ogm2_packet *ogm2, struct batadv_orig_node *orig_node, struct batadv_neigh_node *neigh_node, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { struct batadv_neigh_node *router = NULL; struct batadv_orig_node *orig_neigh_node; struct batadv_neigh_node *orig_neigh_router = NULL; struct batadv_neigh_ifinfo *router_ifinfo = NULL, *neigh_ifinfo = NULL; u32 router_throughput, neigh_throughput; u32 router_last_seqno; u32 neigh_last_seqno; s32 neigh_seq_diff; bool forward = false; orig_neigh_node = batadv_v_ogm_orig_get(bat_priv, ethhdr->h_source); if (!orig_neigh_node) goto out; orig_neigh_router = batadv_orig_router_get(orig_neigh_node, if_outgoing); /* drop packet if sender is not a direct neighbor and if we * don't route towards it */ router = batadv_orig_router_get(orig_node, if_outgoing); if (router && router->orig_node != orig_node && !orig_neigh_router) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: OGM via unknown neighbor!\n"); goto out; } /* Mark the OGM to be considered for forwarding, and update routes * if needed. */ forward = true; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Searching and updating originator entry of received packet\n"); /* if this neighbor already is our next hop there is nothing * to change */ if (router == neigh_node) goto out; /* don't consider neighbours with worse throughput. * also switch route if this seqno is BATADV_V_MAX_ORIGDIFF newer than * the last received seqno from our best next hop. */ if (router) { router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing); neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing); /* if these are not allocated, something is wrong. */ if (!router_ifinfo || !neigh_ifinfo) goto out; neigh_last_seqno = neigh_ifinfo->bat_v.last_seqno; router_last_seqno = router_ifinfo->bat_v.last_seqno; neigh_seq_diff = neigh_last_seqno - router_last_seqno; router_throughput = router_ifinfo->bat_v.throughput; neigh_throughput = neigh_ifinfo->bat_v.throughput; if (neigh_seq_diff < BATADV_OGM_MAX_ORIGDIFF && router_throughput >= neigh_throughput) goto out; } batadv_update_route(bat_priv, orig_node, if_outgoing, neigh_node); out: batadv_neigh_node_put(router); batadv_neigh_node_put(orig_neigh_router); batadv_orig_node_put(orig_neigh_node); batadv_neigh_ifinfo_put(router_ifinfo); batadv_neigh_ifinfo_put(neigh_ifinfo); return forward; } /** * batadv_v_ogm_process_per_outif() - process a batman v OGM for an outgoing if * @bat_priv: the bat priv with all the mesh interface information * @ethhdr: the Ethernet header of the OGM2 * @ogm2: OGM2 structure * @orig_node: Originator structure for which the OGM has been received * @neigh_node: the neigh_node through with the OGM has been received * @if_incoming: the interface where this packet was received * @if_outgoing: the interface for which the packet should be considered */ static void batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv, const struct ethhdr *ethhdr, const struct batadv_ogm2_packet *ogm2, struct batadv_orig_node *orig_node, struct batadv_neigh_node *neigh_node, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { int seqno_age; bool forward; /* first, update the metric with according sanity checks */ seqno_age = batadv_v_ogm_metric_update(bat_priv, ogm2, orig_node, neigh_node, if_incoming, if_outgoing); /* outdated sequence numbers are to be discarded */ if (seqno_age < 0) return; /* only unknown & newer OGMs contain TVLVs we are interested in */ if (seqno_age > 0 && if_outgoing == BATADV_IF_DEFAULT) batadv_tvlv_containers_process(bat_priv, BATADV_OGM2, orig_node, NULL, (unsigned char *)(ogm2 + 1), ntohs(ogm2->tvlv_len)); /* if the metric update went through, update routes if needed */ forward = batadv_v_ogm_route_update(bat_priv, ethhdr, ogm2, orig_node, neigh_node, if_incoming, if_outgoing); /* if the routes have been processed correctly, check and forward */ if (forward) batadv_v_ogm_forward(bat_priv, ogm2, orig_node, neigh_node, if_incoming, if_outgoing); } /** * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated * @buff_pos: current position in the skb * @packet_len: total length of the skb * @ogm2_packet: potential OGM2 in buffer * * Return: true if there is enough space for another OGM, false otherwise. */ static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, const struct batadv_ogm2_packet *ogm2_packet) { int next_buff_pos = 0; /* check if there is enough space for the header */ next_buff_pos += buff_pos + sizeof(*ogm2_packet); if (next_buff_pos > packet_len) return false; /* check if there is enough space for the optional TVLV */ next_buff_pos += ntohs(ogm2_packet->tvlv_len); return next_buff_pos <= packet_len; } /** * batadv_v_ogm_process() - process an incoming batman v OGM * @skb: the skb containing the OGM * @ogm_offset: offset to the OGM which should be processed (for aggregates) * @if_incoming: the interface where this packet was received */ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, struct batadv_hard_iface *if_incoming) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct ethhdr *ethhdr; struct batadv_orig_node *orig_node = NULL; struct batadv_hardif_neigh_node *hardif_neigh = NULL; struct batadv_neigh_node *neigh_node = NULL; struct batadv_hard_iface *hard_iface; struct batadv_ogm2_packet *ogm_packet; u32 ogm_throughput, link_throughput, path_throughput; struct list_head *iter; int ret; ethhdr = eth_hdr(skb); ogm_packet = (struct batadv_ogm2_packet *)(skb->data + ogm_offset); ogm_throughput = ntohl(ogm_packet->throughput); batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Received OGM2 packet via NB: %pM, IF: %s [%pM] (from OG: %pM, seqno %u, throughput %u, TTL %u, V %u, tvlv_len %u)\n", ethhdr->h_source, if_incoming->net_dev->name, if_incoming->net_dev->dev_addr, ogm_packet->orig, ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl, ogm_packet->version, ntohs(ogm_packet->tvlv_len)); if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: originator packet from ourself\n"); return; } /* If the throughput metric is 0, immediately drop the packet. No need * to create orig_node / neigh_node for an unusable route. */ if (ogm_throughput == 0) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: originator packet with throughput metric of 0\n"); return; } /* require ELP packets be to received from this neighbor first */ hardif_neigh = batadv_hardif_neigh_get(if_incoming, ethhdr->h_source); if (!hardif_neigh) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: OGM via unknown neighbor!\n"); goto out; } orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig); if (!orig_node) goto out; neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming, ethhdr->h_source); if (!neigh_node) goto out; /* Update the received throughput metric to match the link * characteristic: * - If this OGM traveled one hop so far (emitted by single hop * neighbor) the path throughput metric equals the link throughput. * - For OGMs traversing more than hop the path throughput metric is * the smaller of the path throughput and the link throughput. */ link_throughput = ewma_throughput_read(&hardif_neigh->bat_v.throughput); path_throughput = min_t(u32, link_throughput, ogm_throughput); ogm_packet->throughput = htonl(path_throughput); batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet, orig_node, neigh_node, if_incoming, BATADV_IF_DEFAULT); rcu_read_lock(); netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) { if (hard_iface->if_status != BATADV_IF_ACTIVE) continue; if (!kref_get_unless_zero(&hard_iface->refcount)) continue; ret = batadv_hardif_no_broadcast(hard_iface, ogm_packet->orig, hardif_neigh->orig); if (ret) { char *type; switch (ret) { case BATADV_HARDIF_BCAST_NORECIPIENT: type = "no neighbor"; break; case BATADV_HARDIF_BCAST_DUPFWD: type = "single neighbor is source"; break; case BATADV_HARDIF_BCAST_DUPORIG: type = "single neighbor is originator"; break; default: type = "unknown"; } batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "OGM2 packet from %pM on %s suppressed: %s\n", ogm_packet->orig, hard_iface->net_dev->name, type); batadv_hardif_put(hard_iface); continue; } batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet, orig_node, neigh_node, if_incoming, hard_iface); batadv_hardif_put(hard_iface); } rcu_read_unlock(); out: batadv_orig_node_put(orig_node); batadv_neigh_node_put(neigh_node); batadv_hardif_neigh_put(hardif_neigh); } /** * batadv_v_ogm_packet_recv() - OGM2 receiving handler * @skb: the received OGM * @if_incoming: the interface where this OGM has been received * * Return: NET_RX_SUCCESS and consume the skb on success or returns NET_RX_DROP * (without freeing the skb) on failure */ int batadv_v_ogm_packet_recv(struct sk_buff *skb, struct batadv_hard_iface *if_incoming) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct batadv_ogm2_packet *ogm_packet; struct ethhdr *ethhdr; int ogm_offset; u8 *packet_pos; int ret = NET_RX_DROP; /* did we receive a OGM2 packet on an interface that does not have * B.A.T.M.A.N. V enabled ? */ if (strcmp(bat_priv->algo_ops->name, "BATMAN_V") != 0) goto free_skb; if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN)) goto free_skb; ethhdr = eth_hdr(skb); if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto free_skb; batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES, skb->len + ETH_HLEN); ogm_offset = 0; ogm_packet = (struct batadv_ogm2_packet *)skb->data; while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb), ogm_packet)) { batadv_v_ogm_process(skb, ogm_offset, if_incoming); ogm_offset += BATADV_OGM2_HLEN; ogm_offset += ntohs(ogm_packet->tvlv_len); packet_pos = skb->data + ogm_offset; ogm_packet = (struct batadv_ogm2_packet *)packet_pos; } ret = NET_RX_SUCCESS; free_skb: if (ret == NET_RX_SUCCESS) consume_skb(skb); else kfree_skb(skb); return ret; } /** * batadv_v_ogm_init() - initialise the OGM2 engine * @bat_priv: the bat priv with all the mesh interface information * * Return: 0 on success or a negative error code in case of failure */ int batadv_v_ogm_init(struct batadv_priv *bat_priv) { struct batadv_ogm2_packet *ogm_packet; unsigned char *ogm_buff; u32 random_seqno; bat_priv->bat_v.ogm_buff_len = BATADV_OGM2_HLEN; ogm_buff = kzalloc(bat_priv->bat_v.ogm_buff_len, GFP_ATOMIC); if (!ogm_buff) return -ENOMEM; bat_priv->bat_v.ogm_buff = ogm_buff; ogm_packet = (struct batadv_ogm2_packet *)ogm_buff; ogm_packet->packet_type = BATADV_OGM2; ogm_packet->version = BATADV_COMPAT_VERSION; ogm_packet->ttl = BATADV_TTL; ogm_packet->flags = BATADV_NO_FLAGS; ogm_packet->throughput = htonl(BATADV_THROUGHPUT_MAX_VALUE); /* randomize initial seqno to avoid collision */ get_random_bytes(&random_seqno, sizeof(random_seqno)); atomic_set(&bat_priv->bat_v.ogm_seqno, random_seqno); INIT_DELAYED_WORK(&bat_priv->bat_v.ogm_wq, batadv_v_ogm_send); mutex_init(&bat_priv->bat_v.ogm_buff_mutex); return 0; } /** * batadv_v_ogm_free() - free OGM private resources * @bat_priv: the bat priv with all the mesh interface information */ void batadv_v_ogm_free(struct batadv_priv *bat_priv) { cancel_delayed_work_sync(&bat_priv->bat_v.ogm_wq); mutex_lock(&bat_priv->bat_v.ogm_buff_mutex); kfree(bat_priv->bat_v.ogm_buff); bat_priv->bat_v.ogm_buff = NULL; bat_priv->bat_v.ogm_buff_len = 0; mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex); } |
| 2 2 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 | // SPDX-License-Identifier: GPL-2.0-or-later /* * * Digianswer Bluetooth USB driver * * Copyright (C) 2004-2007 Marcel Holtmann <marcel@holtmann.org> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/usb.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "h4_recv.h" #define VERSION "0.11" static const struct usb_device_id bpa10x_table[] = { /* Tektronix BPA 100/105 (Digianswer) */ { USB_DEVICE(0x08fd, 0x0002) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, bpa10x_table); struct bpa10x_data { struct hci_dev *hdev; struct usb_device *udev; struct usb_anchor tx_anchor; struct usb_anchor rx_anchor; struct sk_buff *rx_skb[2]; }; static void bpa10x_tx_complete(struct urb *urb) { struct sk_buff *skb = urb->context; struct hci_dev *hdev = (struct hci_dev *) skb->dev; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) goto done; if (!urb->status) hdev->stat.byte_tx += urb->transfer_buffer_length; else hdev->stat.err_tx++; done: kfree(urb->setup_packet); kfree_skb(skb); } #define HCI_VENDOR_HDR_SIZE 5 #define HCI_RECV_VENDOR \ .type = HCI_VENDOR_PKT, \ .hlen = HCI_VENDOR_HDR_SIZE, \ .loff = 3, \ .lsize = 2, \ .maxlen = HCI_MAX_FRAME_SIZE static const struct h4_recv_pkt bpa10x_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame }, { HCI_RECV_VENDOR, .recv = hci_recv_diag }, }; static void bpa10x_rx_complete(struct urb *urb) { struct hci_dev *hdev = urb->context; struct bpa10x_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) return; if (urb->status == 0) { bool idx = usb_pipebulk(urb->pipe); data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx], urb->transfer_buffer, urb->actual_length, bpa10x_recv_pkts, ARRAY_SIZE(bpa10x_recv_pkts)); if (IS_ERR(data->rx_skb[idx])) { bt_dev_err(hdev, "corrupted event packet"); hdev->stat.err_rx++; data->rx_skb[idx] = NULL; } } usb_anchor_urb(urb, &data->rx_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { bt_dev_err(hdev, "urb %p failed to resubmit (%d)", urb, -err); usb_unanchor_urb(urb); } } static inline int bpa10x_submit_intr_urb(struct hci_dev *hdev) { struct bpa10x_data *data = hci_get_drvdata(hdev); struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size = 16; BT_DBG("%s", hdev->name); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; buf = kmalloc(size, GFP_KERNEL); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvintpipe(data->udev, 0x81); usb_fill_int_urb(urb, data->udev, pipe, buf, size, bpa10x_rx_complete, hdev, 1); urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &data->rx_anchor); err = usb_submit_urb(urb, GFP_KERNEL); if (err < 0) { bt_dev_err(hdev, "urb %p submission failed (%d)", urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static inline int bpa10x_submit_bulk_urb(struct hci_dev *hdev) { struct bpa10x_data *data = hci_get_drvdata(hdev); struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size = 64; BT_DBG("%s", hdev->name); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; buf = kmalloc(size, GFP_KERNEL); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvbulkpipe(data->udev, 0x82); usb_fill_bulk_urb(urb, data->udev, pipe, buf, size, bpa10x_rx_complete, hdev); urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &data->rx_anchor); err = usb_submit_urb(urb, GFP_KERNEL); if (err < 0) { bt_dev_err(hdev, "urb %p submission failed (%d)", urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static int bpa10x_open(struct hci_dev *hdev) { struct bpa10x_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s", hdev->name); err = bpa10x_submit_intr_urb(hdev); if (err < 0) goto error; err = bpa10x_submit_bulk_urb(hdev); if (err < 0) goto error; return 0; error: usb_kill_anchored_urbs(&data->rx_anchor); return err; } static int bpa10x_close(struct hci_dev *hdev) { struct bpa10x_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); usb_kill_anchored_urbs(&data->rx_anchor); return 0; } static int bpa10x_flush(struct hci_dev *hdev) { struct bpa10x_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); usb_kill_anchored_urbs(&data->tx_anchor); return 0; } static int bpa10x_setup(struct hci_dev *hdev) { static const u8 req[] = { 0x07 }; struct sk_buff *skb; BT_DBG("%s", hdev->name); /* Read revision string */ skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); bt_dev_info(hdev, "%s", (char *)(skb->data + 1)); hci_set_fw_info(hdev, "%s", skb->data + 1); kfree_skb(skb); return 0; } static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct bpa10x_data *data = hci_get_drvdata(hdev); struct usb_ctrlrequest *dr; struct urb *urb; unsigned int pipe; int err; BT_DBG("%s", hdev->name); skb->dev = (void *) hdev; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; /* Prepend skb with frame type */ *(u8 *)skb_push(skb, 1) = hci_skb_pkt_type(skb); switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: dr = kmalloc(sizeof(*dr), GFP_KERNEL); if (!dr) { usb_free_urb(urb); return -ENOMEM; } dr->bRequestType = USB_TYPE_VENDOR; dr->bRequest = 0; dr->wIndex = 0; dr->wValue = 0; dr->wLength = __cpu_to_le16(skb->len); pipe = usb_sndctrlpipe(data->udev, 0x00); usb_fill_control_urb(urb, data->udev, pipe, (void *) dr, skb->data, skb->len, bpa10x_tx_complete, skb); hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: pipe = usb_sndbulkpipe(data->udev, 0x02); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, bpa10x_tx_complete, skb); hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: pipe = usb_sndbulkpipe(data->udev, 0x02); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, bpa10x_tx_complete, skb); hdev->stat.sco_tx++; break; default: usb_free_urb(urb); return -EILSEQ; } usb_anchor_urb(urb, &data->tx_anchor); err = usb_submit_urb(urb, GFP_KERNEL); if (err < 0) { bt_dev_err(hdev, "urb %p submission failed", urb); kfree(urb->setup_packet); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static int bpa10x_set_diag(struct hci_dev *hdev, bool enable) { const u8 req[] = { 0x00, enable }; struct sk_buff *skb; BT_DBG("%s", hdev->name); if (!test_bit(HCI_RUNNING, &hdev->flags)) return -ENETDOWN; /* Enable sniffer operation */ skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); kfree_skb(skb); return 0; } static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct bpa10x_data *data; struct hci_dev *hdev; int err; BT_DBG("intf %p id %p", intf, id); if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->udev = interface_to_usbdev(intf); init_usb_anchor(&data->tx_anchor); init_usb_anchor(&data->rx_anchor); hdev = hci_alloc_dev(); if (!hdev) return -ENOMEM; hdev->bus = HCI_USB; hci_set_drvdata(hdev, data); data->hdev = hdev; SET_HCIDEV_DEV(hdev, &intf->dev); hdev->open = bpa10x_open; hdev->close = bpa10x_close; hdev->flush = bpa10x_flush; hdev->setup = bpa10x_setup; hdev->send = bpa10x_send_frame; hdev->set_diag = bpa10x_set_diag; hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE); err = hci_register_dev(hdev); if (err < 0) { hci_free_dev(hdev); return err; } usb_set_intfdata(intf, data); return 0; } static void bpa10x_disconnect(struct usb_interface *intf) { struct bpa10x_data *data = usb_get_intfdata(intf); BT_DBG("intf %p", intf); if (!data) return; usb_set_intfdata(intf, NULL); hci_unregister_dev(data->hdev); hci_free_dev(data->hdev); kfree_skb(data->rx_skb[0]); kfree_skb(data->rx_skb[1]); } static struct usb_driver bpa10x_driver = { .name = "bpa10x", .probe = bpa10x_probe, .disconnect = bpa10x_disconnect, .id_table = bpa10x_table, .disable_hub_initiated_lpm = 1, }; module_usb_driver(bpa10x_driver); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Digianswer Bluetooth USB driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); |
| 121 47 116 29 29 8 8 115 115 56 12 1 11 11 10 1 11 206 206 206 205 205 2 2 1 1 205 1 1 168 36 259 209 50 50 48 49 50 50 208 260 1 1 1 1 15 1 14 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 | /* License: GPL */ #include <linux/filter.h> #include <linux/mutex.h> #include <linux/socket.h> #include <linux/skbuff.h> #include <net/netlink.h> #include <net/net_namespace.h> #include <linux/module.h> #include <net/sock.h> #include <linux/kernel.h> #include <linux/tcp.h> #include <linux/workqueue.h> #include <linux/nospec.h> #include <linux/cookie.h> #include <linux/inet_diag.h> #include <linux/sock_diag.h> static const struct sock_diag_handler __rcu *sock_diag_handlers[AF_MAX]; static const struct sock_diag_inet_compat __rcu *inet_rcv_compat; static struct workqueue_struct *broadcast_wq; DEFINE_COOKIE(sock_cookie); u64 __sock_gen_cookie(struct sock *sk) { u64 res = atomic64_read(&sk->sk_cookie); if (!res) { u64 new = gen_cookie_next(&sock_cookie); atomic64_cmpxchg(&sk->sk_cookie, res, new); /* Another thread might have changed sk_cookie before us. */ res = atomic64_read(&sk->sk_cookie); } return res; } int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie) { u64 res; if (cookie[0] == INET_DIAG_NOCOOKIE && cookie[1] == INET_DIAG_NOCOOKIE) return 0; res = sock_gen_cookie(sk); if ((u32)res != cookie[0] || (u32)(res >> 32) != cookie[1]) return -ESTALE; return 0; } EXPORT_SYMBOL_GPL(sock_diag_check_cookie); void sock_diag_save_cookie(struct sock *sk, __u32 *cookie) { u64 res = sock_gen_cookie(sk); cookie[0] = (u32)res; cookie[1] = (u32)(res >> 32); } EXPORT_SYMBOL_GPL(sock_diag_save_cookie); int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype) { u32 mem[SK_MEMINFO_VARS]; sk_get_meminfo(sk, mem); return nla_put(skb, attrtype, sizeof(mem), &mem); } EXPORT_SYMBOL_GPL(sock_diag_put_meminfo); int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, struct sk_buff *skb, int attrtype) { struct sock_fprog_kern *fprog; struct sk_filter *filter; struct nlattr *attr; unsigned int flen; int err = 0; if (!may_report_filterinfo) { nla_reserve(skb, attrtype, 0); return 0; } rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); if (!filter) goto out; fprog = filter->prog->orig_prog; if (!fprog) goto out; flen = bpf_classic_proglen(fprog); attr = nla_reserve(skb, attrtype, flen); if (attr == NULL) { err = -EMSGSIZE; goto out; } memcpy(nla_data(attr), fprog->filter, flen); out: rcu_read_unlock(); return err; } EXPORT_SYMBOL(sock_diag_put_filterinfo); struct broadcast_sk { struct sock *sk; struct work_struct work; }; static size_t sock_diag_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct inet_diag_msg) + nla_total_size(sizeof(u8)) /* INET_DIAG_PROTOCOL */ + nla_total_size_64bit(sizeof(struct tcp_info))); /* INET_DIAG_INFO */ } static const struct sock_diag_handler *sock_diag_lock_handler(int family) { const struct sock_diag_handler *handler; rcu_read_lock(); handler = rcu_dereference(sock_diag_handlers[family]); if (handler && !try_module_get(handler->owner)) handler = NULL; rcu_read_unlock(); return handler; } static void sock_diag_unlock_handler(const struct sock_diag_handler *handler) { module_put(handler->owner); } static void sock_diag_broadcast_destroy_work(struct work_struct *work) { struct broadcast_sk *bsk = container_of(work, struct broadcast_sk, work); struct sock *sk = bsk->sk; const struct sock_diag_handler *hndl; struct sk_buff *skb; const enum sknetlink_groups group = sock_diag_destroy_group(sk); int err = -1; WARN_ON(group == SKNLGRP_NONE); skb = nlmsg_new(sock_diag_nlmsg_size(), GFP_KERNEL); if (!skb) goto out; hndl = sock_diag_lock_handler(sk->sk_family); if (hndl) { if (hndl->get_info) err = hndl->get_info(skb, sk); sock_diag_unlock_handler(hndl); } if (!err) nlmsg_multicast(sock_net(sk)->diag_nlsk, skb, 0, group, GFP_KERNEL); else kfree_skb(skb); out: sk_destruct(sk); kfree(bsk); } void sock_diag_broadcast_destroy(struct sock *sk) { /* Note, this function is often called from an interrupt context. */ struct broadcast_sk *bsk = kmalloc(sizeof(struct broadcast_sk), GFP_ATOMIC); if (!bsk) return sk_destruct(sk); bsk->sk = sk; INIT_WORK(&bsk->work, sock_diag_broadcast_destroy_work); queue_work(broadcast_wq, &bsk->work); } void sock_diag_register_inet_compat(const struct sock_diag_inet_compat *ptr) { xchg(&inet_rcv_compat, RCU_INITIALIZER(ptr)); } EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat); void sock_diag_unregister_inet_compat(const struct sock_diag_inet_compat *ptr) { const struct sock_diag_inet_compat *old; old = unrcu_pointer(xchg(&inet_rcv_compat, NULL)); WARN_ON_ONCE(old != ptr); } EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat); int sock_diag_register(const struct sock_diag_handler *hndl) { int family = hndl->family; if (family >= AF_MAX) return -EINVAL; return !cmpxchg((const struct sock_diag_handler **) &sock_diag_handlers[family], NULL, hndl) ? 0 : -EBUSY; } EXPORT_SYMBOL_GPL(sock_diag_register); void sock_diag_unregister(const struct sock_diag_handler *hndl) { int family = hndl->family; if (family >= AF_MAX) return; xchg((const struct sock_diag_handler **)&sock_diag_handlers[family], NULL); } EXPORT_SYMBOL_GPL(sock_diag_unregister); static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh) { int err; struct sock_diag_req *req = nlmsg_data(nlh); const struct sock_diag_handler *hndl; if (nlmsg_len(nlh) < sizeof(*req)) return -EINVAL; if (req->sdiag_family >= AF_MAX) return -EINVAL; req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX); if (!rcu_access_pointer(sock_diag_handlers[req->sdiag_family])) sock_load_diag_module(req->sdiag_family, 0); hndl = sock_diag_lock_handler(req->sdiag_family); if (hndl == NULL) return -ENOENT; if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY) err = hndl->dump(skb, nlh); else if (nlh->nlmsg_type == SOCK_DESTROY && hndl->destroy) err = hndl->destroy(skb, nlh); else err = -EOPNOTSUPP; sock_diag_unlock_handler(hndl); return err; } static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { const struct sock_diag_inet_compat *ptr; int ret; switch (nlh->nlmsg_type) { case TCPDIAG_GETSOCK: if (!rcu_access_pointer(inet_rcv_compat)) sock_load_diag_module(AF_INET, 0); rcu_read_lock(); ptr = rcu_dereference(inet_rcv_compat); if (ptr && !try_module_get(ptr->owner)) ptr = NULL; rcu_read_unlock(); ret = -EOPNOTSUPP; if (ptr) { ret = ptr->fn(skb, nlh); module_put(ptr->owner); } return ret; case SOCK_DIAG_BY_FAMILY: case SOCK_DESTROY: return __sock_diag_cmd(skb, nlh); default: return -EINVAL; } } static void sock_diag_rcv(struct sk_buff *skb) { netlink_rcv_skb(skb, &sock_diag_rcv_msg); } static int sock_diag_bind(struct net *net, int group) { switch (group) { case SKNLGRP_INET_TCP_DESTROY: case SKNLGRP_INET_UDP_DESTROY: if (!rcu_access_pointer(sock_diag_handlers[AF_INET])) sock_load_diag_module(AF_INET, 0); break; case SKNLGRP_INET6_TCP_DESTROY: case SKNLGRP_INET6_UDP_DESTROY: if (!rcu_access_pointer(sock_diag_handlers[AF_INET6])) sock_load_diag_module(AF_INET6, 0); break; } return 0; } int sock_diag_destroy(struct sock *sk, int err) { if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; if (!sk->sk_prot->diag_destroy) return -EOPNOTSUPP; return sk->sk_prot->diag_destroy(sk, err); } EXPORT_SYMBOL_GPL(sock_diag_destroy); static int __net_init diag_net_init(struct net *net) { struct netlink_kernel_cfg cfg = { .groups = SKNLGRP_MAX, .input = sock_diag_rcv, .bind = sock_diag_bind, .flags = NL_CFG_F_NONROOT_RECV, }; net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG, &cfg); return net->diag_nlsk == NULL ? -ENOMEM : 0; } static void __net_exit diag_net_exit(struct net *net) { netlink_kernel_release(net->diag_nlsk); net->diag_nlsk = NULL; } static struct pernet_operations diag_net_ops = { .init = diag_net_init, .exit = diag_net_exit, }; static int __init sock_diag_init(void) { broadcast_wq = alloc_workqueue("sock_diag_events", 0, 0); BUG_ON(!broadcast_wq); return register_pernet_subsys(&diag_net_ops); } device_initcall(sock_diag_init); |
| 6491 20 2353 2377 2221 120 15 1797 14 127 25 12 2187 18 25 15 1 1 234 26 2 11 16 177 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* audit.h -- Auditing support * * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. * All Rights Reserved. * * Written by Rickard E. (Rik) Faith <faith@redhat.com> */ #ifndef _LINUX_AUDIT_H_ #define _LINUX_AUDIT_H_ #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/audit_arch.h> #include <uapi/linux/audit.h> #include <uapi/linux/netfilter/nf_tables.h> #include <uapi/linux/fanotify.h> #define AUDIT_INO_UNSET ((unsigned long)-1) #define AUDIT_DEV_UNSET ((dev_t)-1) struct audit_sig_info { uid_t uid; pid_t pid; char ctx[]; }; struct audit_buffer; struct audit_context; struct inode; struct netlink_skb_parms; struct path; struct linux_binprm; struct mq_attr; struct mqstat; struct audit_watch; struct audit_tree; struct sk_buff; struct kern_ipc_perm; struct audit_krule { u32 pflags; u32 flags; u32 listnr; u32 action; u32 mask[AUDIT_BITMASK_SIZE]; u32 buflen; /* for data alloc on list rules */ u32 field_count; char *filterkey; /* ties events to rules */ struct audit_field *fields; struct audit_field *arch_f; /* quick access to arch field */ struct audit_field *inode_f; /* quick access to an inode field */ struct audit_watch *watch; /* associated watch */ struct audit_tree *tree; /* associated watched tree */ struct audit_fsnotify_mark *exe; struct list_head rlist; /* entry in audit_{watch,tree}.rules list */ struct list_head list; /* for AUDIT_LIST* purposes only */ u64 prio; }; /* Flag to indicate legacy AUDIT_LOGINUID unset usage */ #define AUDIT_LOGINUID_LEGACY 0x1 struct audit_field { u32 type; union { u32 val; kuid_t uid; kgid_t gid; struct { char *lsm_str; void *lsm_rule; }; }; u32 op; }; enum audit_ntp_type { AUDIT_NTP_OFFSET, AUDIT_NTP_FREQ, AUDIT_NTP_STATUS, AUDIT_NTP_TAI, AUDIT_NTP_TICK, AUDIT_NTP_ADJUST, AUDIT_NTP_NVALS /* count */ }; #ifdef CONFIG_AUDITSYSCALL struct audit_ntp_val { long long oldval, newval; }; struct audit_ntp_data { struct audit_ntp_val vals[AUDIT_NTP_NVALS]; }; #else struct audit_ntp_data {}; #endif enum audit_nfcfgop { AUDIT_XT_OP_REGISTER, AUDIT_XT_OP_REPLACE, AUDIT_XT_OP_UNREGISTER, AUDIT_NFT_OP_TABLE_REGISTER, AUDIT_NFT_OP_TABLE_UNREGISTER, AUDIT_NFT_OP_CHAIN_REGISTER, AUDIT_NFT_OP_CHAIN_UNREGISTER, AUDIT_NFT_OP_RULE_REGISTER, AUDIT_NFT_OP_RULE_UNREGISTER, AUDIT_NFT_OP_SET_REGISTER, AUDIT_NFT_OP_SET_UNREGISTER, AUDIT_NFT_OP_SETELEM_REGISTER, AUDIT_NFT_OP_SETELEM_UNREGISTER, AUDIT_NFT_OP_GEN_REGISTER, AUDIT_NFT_OP_OBJ_REGISTER, AUDIT_NFT_OP_OBJ_UNREGISTER, AUDIT_NFT_OP_OBJ_RESET, AUDIT_NFT_OP_FLOWTABLE_REGISTER, AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, AUDIT_NFT_OP_SETELEM_RESET, AUDIT_NFT_OP_RULE_RESET, AUDIT_NFT_OP_INVALID, }; extern int __init audit_register_class(int class, unsigned *list); extern int audit_classify_syscall(int abi, unsigned syscall); extern int audit_classify_arch(int arch); /* only for compat system calls */ extern unsigned compat_write_class[]; extern unsigned compat_read_class[]; extern unsigned compat_dir_class[]; extern unsigned compat_chattr_class[]; extern unsigned compat_signal_class[]; /* audit_names->type values */ #define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */ #define AUDIT_TYPE_NORMAL 1 /* a "normal" audit record */ #define AUDIT_TYPE_PARENT 2 /* a parent audit record */ #define AUDIT_TYPE_CHILD_DELETE 3 /* a child being deleted */ #define AUDIT_TYPE_CHILD_CREATE 4 /* a child being created */ /* maximized args number that audit_socketcall can process */ #define AUDITSC_ARGS 6 /* bit values for ->signal->audit_tty */ #define AUDIT_TTY_ENABLE BIT(0) #define AUDIT_TTY_LOG_PASSWD BIT(1) struct filename; #define AUDIT_OFF 0 #define AUDIT_ON 1 #define AUDIT_LOCKED 2 #ifdef CONFIG_AUDIT /* These are defined in audit.c */ /* Public API */ extern __printf(4, 5) void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, const char *fmt, ...); extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); extern __printf(2, 3) void audit_log_format(struct audit_buffer *ab, const char *fmt, ...); extern void audit_log_end(struct audit_buffer *ab); extern bool audit_string_contains_control(const char *string, size_t len); extern void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf, size_t len); extern void audit_log_n_string(struct audit_buffer *ab, const char *buf, size_t n); extern void audit_log_n_untrustedstring(struct audit_buffer *ab, const char *string, size_t n); extern void audit_log_untrustedstring(struct audit_buffer *ab, const char *string); extern void audit_log_d_path(struct audit_buffer *ab, const char *prefix, const struct path *path); extern void audit_log_key(struct audit_buffer *ab, char *key); extern void audit_log_path_denied(int type, const char *operation); extern void audit_log_lost(const char *message); extern int audit_log_task_context(struct audit_buffer *ab); extern void audit_log_task_info(struct audit_buffer *ab); extern int audit_update_lsm_rules(void); /* Private API (for audit.c only) */ extern int audit_rule_change(int type, int seq, void *data, size_t datasz); extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); extern int audit_set_loginuid(kuid_t loginuid); static inline kuid_t audit_get_loginuid(struct task_struct *tsk) { return tsk->loginuid; } static inline unsigned int audit_get_sessionid(struct task_struct *tsk) { return tsk->sessionid; } extern u32 audit_enabled; extern int audit_signal_info(int sig, struct task_struct *t); #else /* CONFIG_AUDIT */ static inline __printf(4, 5) void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, const char *fmt, ...) { } static inline struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type) { return NULL; } static inline __printf(2, 3) void audit_log_format(struct audit_buffer *ab, const char *fmt, ...) { } static inline void audit_log_end(struct audit_buffer *ab) { } static inline void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf, size_t len) { } static inline void audit_log_n_string(struct audit_buffer *ab, const char *buf, size_t n) { } static inline void audit_log_n_untrustedstring(struct audit_buffer *ab, const char *string, size_t n) { } static inline void audit_log_untrustedstring(struct audit_buffer *ab, const char *string) { } static inline void audit_log_d_path(struct audit_buffer *ab, const char *prefix, const struct path *path) { } static inline void audit_log_key(struct audit_buffer *ab, char *key) { } static inline void audit_log_path_denied(int type, const char *operation) { } static inline int audit_log_task_context(struct audit_buffer *ab) { return 0; } static inline void audit_log_task_info(struct audit_buffer *ab) { } static inline kuid_t audit_get_loginuid(struct task_struct *tsk) { return INVALID_UID; } static inline unsigned int audit_get_sessionid(struct task_struct *tsk) { return AUDIT_SID_UNSET; } #define audit_enabled AUDIT_OFF static inline int audit_signal_info(int sig, struct task_struct *t) { return 0; } #endif /* CONFIG_AUDIT */ #ifdef CONFIG_AUDIT_COMPAT_GENERIC #define audit_is_compat(arch) (!((arch) & __AUDIT_ARCH_64BIT)) #else #define audit_is_compat(arch) false #endif #define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ #define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ #define AUDIT_INODE_NOEVAL 4 /* audit record incomplete */ #ifdef CONFIG_AUDITSYSCALL #include <asm/syscall.h> /* for syscall_get_arch() */ /* These are defined in auditsc.c */ /* Public API */ extern int audit_alloc(struct task_struct *task); extern void __audit_free(struct task_struct *task); extern void __audit_uring_entry(u8 op); extern void __audit_uring_exit(int success, long code); extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3); extern void __audit_syscall_exit(int ret_success, long ret_value); extern struct filename *__audit_reusename(const __user char *uptr); extern void __audit_getname(struct filename *name); extern void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags); extern void __audit_file(const struct file *); extern void __audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type); extern void audit_seccomp(unsigned long syscall, long signr, int code); extern void audit_seccomp_actions_logged(const char *names, const char *old_names, int res); extern void __audit_ptrace(struct task_struct *t); static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx) { task->audit_context = ctx; } static inline struct audit_context *audit_context(void) { return current->audit_context; } static inline bool audit_dummy_context(void) { void *p = audit_context(); return !p || *(int *)p; } static inline void audit_free(struct task_struct *task) { if (unlikely(task->audit_context)) __audit_free(task); } static inline void audit_uring_entry(u8 op) { /* * We intentionally check audit_context() before audit_enabled as most * Linux systems (as of ~2021) rely on systemd which forces audit to * be enabled regardless of the user's audit configuration. */ if (unlikely(audit_context() && audit_enabled)) __audit_uring_entry(op); } static inline void audit_uring_exit(int success, long code) { if (unlikely(audit_context())) __audit_uring_exit(success, code); } static inline void audit_syscall_entry(int major, unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3) { if (unlikely(audit_context())) __audit_syscall_entry(major, a0, a1, a2, a3); } static inline void audit_syscall_exit(void *pt_regs) { if (unlikely(audit_context())) { int success = is_syscall_success(pt_regs); long return_code = regs_return_value(pt_regs); __audit_syscall_exit(success, return_code); } } static inline struct filename *audit_reusename(const __user char *name) { if (unlikely(!audit_dummy_context())) return __audit_reusename(name); return NULL; } static inline void audit_getname(struct filename *name) { if (unlikely(!audit_dummy_context())) __audit_getname(name); } static inline void audit_inode(struct filename *name, const struct dentry *dentry, unsigned int aflags) { if (unlikely(!audit_dummy_context())) __audit_inode(name, dentry, aflags); } static inline void audit_file(struct file *file) { if (unlikely(!audit_dummy_context())) __audit_file(file); } static inline void audit_inode_parent_hidden(struct filename *name, const struct dentry *dentry) { if (unlikely(!audit_dummy_context())) __audit_inode(name, dentry, AUDIT_INODE_PARENT | AUDIT_INODE_HIDDEN); } static inline void audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type) { if (unlikely(!audit_dummy_context())) __audit_inode_child(parent, dentry, type); } void audit_core_dumps(long signr); static inline void audit_ptrace(struct task_struct *t) { if (unlikely(!audit_dummy_context())) __audit_ptrace(t); } /* Private API (for audit.c only) */ extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); extern void __audit_bprm(struct linux_binprm *bprm); extern int __audit_socketcall(int nargs, unsigned long *args); extern int __audit_sockaddr(int len, void *addr); extern void __audit_fd_pair(int fd1, int fd2); extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr); extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout); extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification); extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *new, const struct cred *old); extern void __audit_log_capset(const struct cred *new, const struct cred *old); extern void __audit_mmap_fd(int fd, int flags); extern void __audit_openat2_how(struct open_how *how); extern void __audit_log_kern_module(const char *name); extern void __audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar); extern void __audit_tk_injoffset(struct timespec64 offset); extern void __audit_ntp_log(const struct audit_ntp_data *ad); extern void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, enum audit_nfcfgop op, gfp_t gfp); static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { if (unlikely(!audit_dummy_context())) __audit_ipc_obj(ipcp); } static inline void audit_fd_pair(int fd1, int fd2) { if (unlikely(!audit_dummy_context())) __audit_fd_pair(fd1, fd2); } static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) { if (unlikely(!audit_dummy_context())) __audit_ipc_set_perm(qbytes, uid, gid, mode); } static inline void audit_bprm(struct linux_binprm *bprm) { if (unlikely(!audit_dummy_context())) __audit_bprm(bprm); } static inline int audit_socketcall(int nargs, unsigned long *args) { if (unlikely(!audit_dummy_context())) return __audit_socketcall(nargs, args); return 0; } static inline int audit_socketcall_compat(int nargs, u32 *args) { unsigned long a[AUDITSC_ARGS]; int i; if (audit_dummy_context()) return 0; for (i = 0; i < nargs; i++) a[i] = (unsigned long)args[i]; return __audit_socketcall(nargs, a); } static inline int audit_sockaddr(int len, void *addr) { if (unlikely(!audit_dummy_context())) return __audit_sockaddr(len, addr); return 0; } static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) { if (unlikely(!audit_dummy_context())) __audit_mq_open(oflag, mode, attr); } static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout) { if (unlikely(!audit_dummy_context())) __audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout); } static inline void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) { if (unlikely(!audit_dummy_context())) __audit_mq_notify(mqdes, notification); } static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) { if (unlikely(!audit_dummy_context())) __audit_mq_getsetattr(mqdes, mqstat); } static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *new, const struct cred *old) { if (unlikely(!audit_dummy_context())) return __audit_log_bprm_fcaps(bprm, new, old); return 0; } static inline void audit_log_capset(const struct cred *new, const struct cred *old) { if (unlikely(!audit_dummy_context())) __audit_log_capset(new, old); } static inline void audit_mmap_fd(int fd, int flags) { if (unlikely(!audit_dummy_context())) __audit_mmap_fd(fd, flags); } static inline void audit_openat2_how(struct open_how *how) { if (unlikely(!audit_dummy_context())) __audit_openat2_how(how); } static inline void audit_log_kern_module(const char *name) { if (!audit_dummy_context()) __audit_log_kern_module(name); } static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar) { if (audit_enabled) __audit_fanotify(response, friar); } static inline void audit_tk_injoffset(struct timespec64 offset) { /* ignore no-op events */ if (offset.tv_sec == 0 && offset.tv_nsec == 0) return; if (!audit_dummy_context()) __audit_tk_injoffset(offset); } static inline void audit_ntp_init(struct audit_ntp_data *ad) { memset(ad, 0, sizeof(*ad)); } static inline void audit_ntp_set_old(struct audit_ntp_data *ad, enum audit_ntp_type type, long long val) { ad->vals[type].oldval = val; } static inline void audit_ntp_set_new(struct audit_ntp_data *ad, enum audit_ntp_type type, long long val) { ad->vals[type].newval = val; } static inline void audit_ntp_log(const struct audit_ntp_data *ad) { if (!audit_dummy_context()) __audit_ntp_log(ad); } static inline void audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, enum audit_nfcfgop op, gfp_t gfp) { if (audit_enabled) __audit_log_nfcfg(name, af, nentries, op, gfp); } extern int audit_n_rules; extern int audit_signals; #else /* CONFIG_AUDITSYSCALL */ static inline int audit_alloc(struct task_struct *task) { return 0; } static inline void audit_free(struct task_struct *task) { } static inline void audit_uring_entry(u8 op) { } static inline void audit_uring_exit(int success, long code) { } static inline void audit_syscall_entry(int major, unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3) { } static inline void audit_syscall_exit(void *pt_regs) { } static inline bool audit_dummy_context(void) { return true; } static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx) { } static inline struct audit_context *audit_context(void) { return NULL; } static inline struct filename *audit_reusename(const __user char *name) { return NULL; } static inline void audit_getname(struct filename *name) { } static inline void audit_inode(struct filename *name, const struct dentry *dentry, unsigned int aflags) { } static inline void audit_file(struct file *file) { } static inline void audit_inode_parent_hidden(struct filename *name, const struct dentry *dentry) { } static inline void audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type) { } static inline void audit_core_dumps(long signr) { } static inline void audit_seccomp(unsigned long syscall, long signr, int code) { } static inline void audit_seccomp_actions_logged(const char *names, const char *old_names, int res) { } static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { } static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) { } static inline void audit_bprm(struct linux_binprm *bprm) { } static inline int audit_socketcall(int nargs, unsigned long *args) { return 0; } static inline int audit_socketcall_compat(int nargs, u32 *args) { return 0; } static inline void audit_fd_pair(int fd1, int fd2) { } static inline int audit_sockaddr(int len, void *addr) { return 0; } static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) { } static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout) { } static inline void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) { } static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) { } static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *new, const struct cred *old) { return 0; } static inline void audit_log_capset(const struct cred *new, const struct cred *old) { } static inline void audit_mmap_fd(int fd, int flags) { } static inline void audit_openat2_how(struct open_how *how) { } static inline void audit_log_kern_module(const char *name) { } static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar) { } static inline void audit_tk_injoffset(struct timespec64 offset) { } static inline void audit_ntp_init(struct audit_ntp_data *ad) { } static inline void audit_ntp_set_old(struct audit_ntp_data *ad, enum audit_ntp_type type, long long val) { } static inline void audit_ntp_set_new(struct audit_ntp_data *ad, enum audit_ntp_type type, long long val) { } static inline void audit_ntp_log(const struct audit_ntp_data *ad) { } static inline void audit_ptrace(struct task_struct *t) { } static inline void audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, enum audit_nfcfgop op, gfp_t gfp) { } #define audit_n_rules 0 #define audit_signals 0 #endif /* CONFIG_AUDITSYSCALL */ static inline bool audit_loginuid_set(struct task_struct *tsk) { return uid_valid(audit_get_loginuid(tsk)); } #endif |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 | #ifndef IOU_CORE_H #define IOU_CORE_H #include <linux/errno.h> #include <linux/lockdep.h> #include <linux/resume_user_mode.h> #include <linux/kasan.h> #include <linux/poll.h> #include <linux/io_uring_types.h> #include <uapi/linux/eventpoll.h> #include "alloc_cache.h" #include "io-wq.h" #include "slist.h" #include "filetable.h" #include "opdef.h" #ifndef CREATE_TRACE_POINTS #include <trace/events/io_uring.h> #endif enum { IOU_COMPLETE = 0, IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, /* * The request has more work to do and should be retried. io_uring will * attempt to wait on the file for eligible opcodes, but otherwise * it'll be handed to iowq for blocking execution. It works for normal * requests as well as for the multi shot mode. */ IOU_RETRY = -EAGAIN, /* * Requeue the task_work to restart operations on this request. The * actual value isn't important, should just be not an otherwise * valid error code, yet less than -MAX_ERRNO and valid internally. */ IOU_REQUEUE = -3072, }; struct io_wait_queue { struct wait_queue_entry wq; struct io_ring_ctx *ctx; unsigned cq_tail; unsigned cq_min_tail; unsigned nr_timeouts; int hit_timeout; ktime_t min_timeout; ktime_t timeout; struct hrtimer t; #ifdef CONFIG_NET_RX_BUSY_POLL ktime_t napi_busy_poll_dt; bool napi_prefer_busy_poll; #endif }; static inline bool io_should_wake(struct io_wait_queue *iowq) { struct io_ring_ctx *ctx = iowq->ctx; int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail; /* * Wake up if we have enough events, or if a timeout occurred since we * started waiting. For timeouts, we always want to return to userspace, * regardless of event count. */ return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; } #define IORING_MAX_ENTRIES 32768 #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) unsigned long rings_size(unsigned int flags, unsigned int sq_entries, unsigned int cq_entries, size_t *sq_offset); int io_uring_fill_params(unsigned entries, struct io_uring_params *p); bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow, bool cqe32); int io_run_task_work_sig(struct io_ring_ctx *ctx); void io_req_defer_failed(struct io_kiocb *req, s32 res); bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags); bool io_req_post_cqe32(struct io_kiocb *req, struct io_uring_cqe src_cqe[2]); void __io_commit_cqring_flush(struct io_ring_ctx *ctx); void io_req_track_inflight(struct io_kiocb *req); struct file *io_file_get_normal(struct io_kiocb *req, int fd); struct file *io_file_get_fixed(struct io_kiocb *req, int fd, unsigned issue_flags); void __io_req_task_work_add(struct io_kiocb *req, unsigned flags); void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags); void io_req_task_queue(struct io_kiocb *req); void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw); void io_req_task_queue_fail(struct io_kiocb *req, int ret); void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw); struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries); struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count); void tctx_task_work(struct callback_head *cb); __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file, int start, int end); void io_req_queue_iowq(struct io_kiocb *req); int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw); int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); void __io_submit_flush_completions(struct io_ring_ctx *ctx); struct io_wq_work *io_wq_free_work(struct io_wq_work *work); void io_wq_submit_work(struct io_wq_work *work); void io_free_req(struct io_kiocb *req); void io_queue_next(struct io_kiocb *req); void io_task_refs_refill(struct io_uring_task *tctx); bool __io_alloc_req_refill(struct io_ring_ctx *ctx); bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx, bool cancel_all); void io_activate_pollwq(struct io_ring_ctx *ctx); static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx) { #if defined(CONFIG_PROVE_LOCKING) lockdep_assert(in_task()); if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) lockdep_assert_held(&ctx->uring_lock); if (ctx->flags & IORING_SETUP_IOPOLL) { lockdep_assert_held(&ctx->uring_lock); } else if (!ctx->task_complete) { lockdep_assert_held(&ctx->completion_lock); } else if (ctx->submitter_task) { /* * ->submitter_task may be NULL and we can still post a CQE, * if the ring has been setup with IORING_SETUP_R_DISABLED. * Not from an SQE, as those cannot be submitted, but via * updating tagged resources. */ if (!percpu_ref_is_dying(&ctx->refs)) lockdep_assert(current == ctx->submitter_task); } #endif } static inline bool io_is_compat(struct io_ring_ctx *ctx) { return IS_ENABLED(CONFIG_COMPAT) && unlikely(ctx->compat); } static inline void io_req_task_work_add(struct io_kiocb *req) { __io_req_task_work_add(req, 0); } static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) { if (!wq_list_empty(&ctx->submit_state.compl_reqs) || ctx->submit_state.cq_flush) __io_submit_flush_completions(ctx); } #define io_for_each_link(pos, head) \ for (pos = (head); pos; pos = pos->link) static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx, struct io_uring_cqe **ret, bool overflow, bool cqe32) { io_lockdep_assert_cq_locked(ctx); if (unlikely(ctx->cqe_sentinel - ctx->cqe_cached < (cqe32 + 1))) { if (unlikely(!io_cqe_cache_refill(ctx, overflow, cqe32))) return false; } *ret = ctx->cqe_cached; ctx->cached_cq_tail++; ctx->cqe_cached++; if (ctx->flags & IORING_SETUP_CQE32) { ctx->cqe_cached++; } else if (cqe32 && ctx->flags & IORING_SETUP_CQE_MIXED) { ctx->cqe_cached++; ctx->cached_cq_tail++; } WARN_ON_ONCE(ctx->cqe_cached > ctx->cqe_sentinel); return true; } static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret, bool cqe32) { return io_get_cqe_overflow(ctx, ret, false, cqe32); } static inline bool io_defer_get_uncommited_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **cqe_ret) { io_lockdep_assert_cq_locked(ctx); ctx->submit_state.cq_flush = true; return io_get_cqe(ctx, cqe_ret, ctx->flags & IORING_SETUP_CQE_MIXED); } static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, struct io_kiocb *req) { bool is_cqe32 = req->cqe.flags & IORING_CQE_F_32; struct io_uring_cqe *cqe; /* * If we can't get a cq entry, userspace overflowed the submission * (by quite a lot). */ if (unlikely(!io_get_cqe(ctx, &cqe, is_cqe32))) return false; memcpy(cqe, &req->cqe, sizeof(*cqe)); if (is_cqe32) { memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe)); memset(&req->big_cqe, 0, sizeof(req->big_cqe)); } if (trace_io_uring_complete_enabled()) trace_io_uring_complete(req->ctx, req, cqe); return true; } static inline void req_set_fail(struct io_kiocb *req) { req->flags |= REQ_F_FAIL; if (req->flags & REQ_F_CQE_SKIP) { req->flags &= ~REQ_F_CQE_SKIP; req->flags |= REQ_F_SKIP_LINK_CQES; } } static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) { req->cqe.res = res; req->cqe.flags = cflags; } static inline u32 ctx_cqe32_flags(struct io_ring_ctx *ctx) { if (ctx->flags & IORING_SETUP_CQE_MIXED) return IORING_CQE_F_32; return 0; } static inline void io_req_set_res32(struct io_kiocb *req, s32 res, u32 cflags, __u64 extra1, __u64 extra2) { req->cqe.res = res; req->cqe.flags = cflags | ctx_cqe32_flags(req->ctx); req->big_cqe.extra1 = extra1; req->big_cqe.extra2 = extra2; } static inline void *io_uring_alloc_async_data(struct io_alloc_cache *cache, struct io_kiocb *req) { if (cache) { req->async_data = io_cache_alloc(cache, GFP_KERNEL); } else { const struct io_issue_def *def = &io_issue_defs[req->opcode]; WARN_ON_ONCE(!def->async_size); req->async_data = kmalloc(def->async_size, GFP_KERNEL); } if (req->async_data) req->flags |= REQ_F_ASYNC_DATA; return req->async_data; } static inline bool req_has_async_data(struct io_kiocb *req) { return req->flags & REQ_F_ASYNC_DATA; } static inline void io_req_async_data_clear(struct io_kiocb *req, io_req_flags_t extra_flags) { req->flags &= ~(REQ_F_ASYNC_DATA|extra_flags); req->async_data = NULL; } static inline void io_req_async_data_free(struct io_kiocb *req) { kfree(req->async_data); io_req_async_data_clear(req, 0); } static inline void io_put_file(struct io_kiocb *req) { if (!(req->flags & REQ_F_FIXED_FILE) && req->file) fput(req->file); } static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, unsigned issue_flags) { lockdep_assert_held(&ctx->uring_lock); if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) mutex_unlock(&ctx->uring_lock); } static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, unsigned issue_flags) { /* * "Normal" inline submissions always hold the uring_lock, since we * grab it from the system call. Same is true for the SQPOLL offload. * The only exception is when we've detached the request and issue it * from an async worker thread, grab the lock for that case. */ if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) mutex_lock(&ctx->uring_lock); lockdep_assert_held(&ctx->uring_lock); } static inline void io_commit_cqring(struct io_ring_ctx *ctx) { /* order cqe stores with ring update */ smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); } static inline void __io_wq_wake(struct wait_queue_head *wq) { /* * * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter * set in the mask so that if we recurse back into our own poll * waitqueue handlers, we know we have a dependency between eventfd or * epoll and should terminate multishot poll at that point. */ if (wq_has_sleeper(wq)) __wake_up(wq, TASK_NORMAL, 0, poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); } static inline void io_poll_wq_wake(struct io_ring_ctx *ctx) { __io_wq_wake(&ctx->poll_wq); } static inline void io_cqring_wake(struct io_ring_ctx *ctx) { /* * Trigger waitqueue handler on all waiters on our waitqueue. This * won't necessarily wake up all the tasks, io_should_wake() will make * that decision. */ __io_wq_wake(&ctx->cq_wait); } static inline bool io_sqring_full(struct io_ring_ctx *ctx) { struct io_rings *r = ctx->rings; /* * SQPOLL must use the actual sqring head, as using the cached_sq_head * is race prone if the SQPOLL thread has grabbed entries but not yet * committed them to the ring. For !SQPOLL, this doesn't matter, but * since this helper is just used for SQPOLL sqring waits (or POLLOUT), * just read the actual sqring head unconditionally. */ return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries; } static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) { struct io_rings *rings = ctx->rings; unsigned int entries; /* make sure SQ entry isn't read before tail */ entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; return min(entries, ctx->sq_entries); } static inline int io_run_task_work(void) { bool ret = false; /* * Always check-and-clear the task_work notification signal. With how * signaling works for task_work, we can find it set with nothing to * run. We need to clear it for that case, like get_signal() does. */ if (test_thread_flag(TIF_NOTIFY_SIGNAL)) clear_notify_signal(); /* * PF_IO_WORKER never returns to userspace, so check here if we have * notify work that needs processing. */ if (current->flags & PF_IO_WORKER) { if (test_thread_flag(TIF_NOTIFY_RESUME)) { __set_current_state(TASK_RUNNING); resume_user_mode_work(NULL); } if (current->io_uring) { unsigned int count = 0; __set_current_state(TASK_RUNNING); tctx_task_work_run(current->io_uring, UINT_MAX, &count); if (count) ret = true; } } if (task_work_pending(current)) { __set_current_state(TASK_RUNNING); task_work_run(); ret = true; } return ret; } static inline bool io_local_work_pending(struct io_ring_ctx *ctx) { return !llist_empty(&ctx->work_llist) || !llist_empty(&ctx->retry_llist); } static inline bool io_task_work_pending(struct io_ring_ctx *ctx) { return task_work_pending(current) || io_local_work_pending(ctx); } static inline void io_tw_lock(struct io_ring_ctx *ctx, io_tw_token_t tw) { lockdep_assert_held(&ctx->uring_lock); } /* * Don't complete immediately but use deferred completion infrastructure. * Protected by ->uring_lock and can only be used either with * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex. */ static inline void io_req_complete_defer(struct io_kiocb *req) __must_hold(&req->ctx->uring_lock) { struct io_submit_state *state = &req->ctx->submit_state; lockdep_assert_held(&req->ctx->uring_lock); wq_list_add_tail(&req->comp_list, &state->compl_reqs); } static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) { if (unlikely(ctx->off_timeout_used || ctx->has_evfd || ctx->poll_activated)) __io_commit_cqring_flush(ctx); } static inline void io_get_task_refs(int nr) { struct io_uring_task *tctx = current->io_uring; tctx->cached_refs -= nr; if (unlikely(tctx->cached_refs < 0)) io_task_refs_refill(tctx); } static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) { return !ctx->submit_state.free_list.next; } extern struct kmem_cache *req_cachep; static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx) { struct io_kiocb *req; req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list); wq_stack_extract(&ctx->submit_state.free_list); return req; } static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req) { if (unlikely(io_req_cache_empty(ctx))) { if (!__io_alloc_req_refill(ctx)) return false; } *req = io_extract_req(ctx); return true; } static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx) { return likely(ctx->submitter_task == current); } static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) { return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) || ctx->submitter_task == current); } /* * Terminate the request if either of these conditions are true: * * 1) It's being executed by the original task, but that task is marked * with PF_EXITING as it's exiting. * 2) PF_KTHREAD is set, in which case the invoker of the task_work is * our fallback task_work. */ static inline bool io_should_terminate_tw(void) { return current->flags & (PF_KTHREAD | PF_EXITING); } static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) { io_req_set_res(req, res, 0); req->io_task_work.func = io_req_task_complete; io_req_task_work_add(req); } /* * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each * slot. */ static inline size_t uring_sqe_size(struct io_ring_ctx *ctx) { if (ctx->flags & IORING_SETUP_SQE128) return 2 * sizeof(struct io_uring_sqe); return sizeof(struct io_uring_sqe); } static inline bool io_file_can_poll(struct io_kiocb *req) { if (req->flags & REQ_F_CAN_POLL) return true; if (req->file && file_can_poll(req->file)) { req->flags |= REQ_F_CAN_POLL; return true; } return false; } static inline ktime_t io_get_time(struct io_ring_ctx *ctx) { if (ctx->clockid == CLOCK_MONOTONIC) return ktime_get(); return ktime_get_with_offset(ctx->clock_offset); } enum { IO_CHECK_CQ_OVERFLOW_BIT, IO_CHECK_CQ_DROPPED_BIT, }; static inline bool io_has_work(struct io_ring_ctx *ctx) { return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) || io_local_work_pending(ctx); } #endif |
| 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 | /* SPDX-License-Identifier: GPL-2.0-only */ /* Driver for Realtek RTS5139 USB card reader * * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. * * Author: * Roger Tseng <rogerable@realtek.com> */ #ifndef __RTSX_USB_H #define __RTSX_USB_H #include <linux/usb.h> #define DRV_NAME_RTSX_USB "rtsx_usb" #define DRV_NAME_RTSX_USB_SDMMC "rtsx_usb_sdmmc" #define DRV_NAME_RTSX_USB_MS "rtsx_usb_ms" /* related module names */ #define RTSX_USB_SD_CARD 0 #define RTSX_USB_MS_CARD 1 /* endpoint numbers */ #define EP_BULK_OUT 1 #define EP_BULK_IN 2 #define EP_INTR_IN 3 /* USB vendor requests */ #define RTSX_USB_REQ_REG_OP 0x00 #define RTSX_USB_REQ_POLL 0x02 /* miscellaneous parameters */ #define MIN_DIV_N 60 #define MAX_DIV_N 120 #define MAX_PHASE 15 #define RX_TUNING_CNT 3 #define QFN24 0 #define LQFP48 1 #define CHECK_PKG(ucr, pkg) ((ucr)->package == (pkg)) /* data structures */ struct rtsx_ucr { u16 vendor_id; u16 product_id; int package; u8 ic_version; bool is_rts5179; unsigned int cur_clk; u8 *cmd_buf; unsigned int cmd_idx; u8 *rsp_buf; struct usb_device *pusb_dev; struct usb_interface *pusb_intf; struct usb_sg_request current_sg; struct timer_list sg_timer; struct mutex dev_mutex; }; /* buffer size */ #define IOBUF_SIZE 1024 /* prototypes of exported functions */ extern int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status); extern int rtsx_usb_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data); extern int rtsx_usb_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, u8 data); extern int rtsx_usb_ep0_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, u8 data); extern int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data); extern void rtsx_usb_add_cmd(struct rtsx_ucr *ucr, u8 cmd_type, u16 reg_addr, u8 mask, u8 data); extern int rtsx_usb_send_cmd(struct rtsx_ucr *ucr, u8 flag, int timeout); extern int rtsx_usb_get_rsp(struct rtsx_ucr *ucr, int rsp_len, int timeout); extern int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe, void *buf, unsigned int len, int use_sg, unsigned int *act_len, int timeout); extern int rtsx_usb_read_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len); extern int rtsx_usb_write_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len); extern int rtsx_usb_switch_clock(struct rtsx_ucr *ucr, unsigned int card_clock, u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); extern int rtsx_usb_card_exclusive_check(struct rtsx_ucr *ucr, int card); /* card status */ #define SD_CD 0x01 #define MS_CD 0x02 #define XD_CD 0x04 #define CD_MASK (SD_CD | MS_CD | XD_CD) #define SD_WP 0x08 /* OCPCTL */ #define MS_OCP_DETECT_EN 0x08 #define MS_OCP_INT_EN 0x04 #define MS_OCP_INT_CLR 0x02 #define MS_OCP_CLEAR 0x01 /* OCPSTAT */ #define MS_OCP_DETECT 0x80 #define MS_OCP_NOW 0x02 #define MS_OCP_EVER 0x01 /* reader command field offset & parameters */ #define READ_REG_CMD 0 #define WRITE_REG_CMD 1 #define CHECK_REG_CMD 2 #define PACKET_TYPE 4 #define CNT_H 5 #define CNT_L 6 #define STAGE_FLAG 7 #define CMD_OFFSET 8 #define SEQ_WRITE_DATA_OFFSET 12 #define BATCH_CMD 0 #define SEQ_READ 1 #define SEQ_WRITE 2 #define STAGE_R 0x01 #define STAGE_DI 0x02 #define STAGE_DO 0x04 #define STAGE_MS_STATUS 0x08 #define STAGE_XD_STATUS 0x10 #define MODE_C 0x00 #define MODE_CR (STAGE_R) #define MODE_CDIR (STAGE_R | STAGE_DI) #define MODE_CDOR (STAGE_R | STAGE_DO) #define EP0_OP_SHIFT 14 #define EP0_READ_REG_CMD 2 #define EP0_WRITE_REG_CMD 3 #define rtsx_usb_cmd_hdr_tag(ucr) \ do { \ ucr->cmd_buf[0] = 'R'; \ ucr->cmd_buf[1] = 'T'; \ ucr->cmd_buf[2] = 'C'; \ ucr->cmd_buf[3] = 'R'; \ } while (0) static inline void rtsx_usb_init_cmd(struct rtsx_ucr *ucr) { rtsx_usb_cmd_hdr_tag(ucr); ucr->cmd_idx = 0; ucr->cmd_buf[PACKET_TYPE] = BATCH_CMD; } /* internal register address */ #define FPDCTL 0xFC00 #define SSC_DIV_N_0 0xFC07 #define SSC_CTL1 0xFC09 #define SSC_CTL2 0xFC0A #define CFG_MODE 0xFC0E #define CFG_MODE_1 0xFC0F #define RCCTL 0xFC14 #define SOF_WDOG 0xFC28 #define SYS_DUMMY0 0xFC30 #define MS_BLKEND 0xFD30 #define MS_READ_START 0xFD31 #define MS_READ_COUNT 0xFD32 #define MS_WRITE_START 0xFD33 #define MS_WRITE_COUNT 0xFD34 #define MS_COMMAND 0xFD35 #define MS_OLD_BLOCK_0 0xFD36 #define MS_OLD_BLOCK_1 0xFD37 #define MS_NEW_BLOCK_0 0xFD38 #define MS_NEW_BLOCK_1 0xFD39 #define MS_LOG_BLOCK_0 0xFD3A #define MS_LOG_BLOCK_1 0xFD3B #define MS_BUS_WIDTH 0xFD3C #define MS_PAGE_START 0xFD3D #define MS_PAGE_LENGTH 0xFD3E #define MS_CFG 0xFD40 #define MS_TPC 0xFD41 #define MS_TRANS_CFG 0xFD42 #define MS_TRANSFER 0xFD43 #define MS_INT_REG 0xFD44 #define MS_BYTE_CNT 0xFD45 #define MS_SECTOR_CNT_L 0xFD46 #define MS_SECTOR_CNT_H 0xFD47 #define MS_DBUS_H 0xFD48 #define CARD_DMA1_CTL 0xFD5C #define CARD_PULL_CTL1 0xFD60 #define CARD_PULL_CTL2 0xFD61 #define CARD_PULL_CTL3 0xFD62 #define CARD_PULL_CTL4 0xFD63 #define CARD_PULL_CTL5 0xFD64 #define CARD_PULL_CTL6 0xFD65 #define CARD_EXIST 0xFD6F #define CARD_INT_PEND 0xFD71 #define LDO_POWER_CFG 0xFD7B #define SD_CFG1 0xFDA0 #define SD_CFG2 0xFDA1 #define SD_CFG3 0xFDA2 #define SD_STAT1 0xFDA3 #define SD_STAT2 0xFDA4 #define SD_BUS_STAT 0xFDA5 #define SD_PAD_CTL 0xFDA6 #define SD_SAMPLE_POINT_CTL 0xFDA7 #define SD_PUSH_POINT_CTL 0xFDA8 #define SD_CMD0 0xFDA9 #define SD_CMD1 0xFDAA #define SD_CMD2 0xFDAB #define SD_CMD3 0xFDAC #define SD_CMD4 0xFDAD #define SD_CMD5 0xFDAE #define SD_BYTE_CNT_L 0xFDAF #define SD_BYTE_CNT_H 0xFDB0 #define SD_BLOCK_CNT_L 0xFDB1 #define SD_BLOCK_CNT_H 0xFDB2 #define SD_TRANSFER 0xFDB3 #define SD_CMD_STATE 0xFDB5 #define SD_DATA_STATE 0xFDB6 #define SD_VPCLK0_CTL 0xFC2A #define SD_VPCLK1_CTL 0xFC2B #define SD_DCMPS0_CTL 0xFC2C #define SD_DCMPS1_CTL 0xFC2D #define CARD_DMA1_CTL 0xFD5C #define HW_VERSION 0xFC01 #define SSC_CLK_FPGA_SEL 0xFC02 #define CLK_DIV 0xFC03 #define SFSM_ED 0xFC04 #define CD_DEGLITCH_WIDTH 0xFC20 #define CD_DEGLITCH_EN 0xFC21 #define AUTO_DELINK_EN 0xFC23 #define FPGA_PULL_CTL 0xFC1D #define CARD_CLK_SOURCE 0xFC2E #define CARD_SHARE_MODE 0xFD51 #define CARD_DRIVE_SEL 0xFD52 #define CARD_STOP 0xFD53 #define CARD_OE 0xFD54 #define CARD_AUTO_BLINK 0xFD55 #define CARD_GPIO 0xFD56 #define SD30_DRIVE_SEL 0xFD57 #define CARD_DATA_SOURCE 0xFD5D #define CARD_SELECT 0xFD5E #define CARD_CLK_EN 0xFD79 #define CARD_PWR_CTL 0xFD7A #define OCPCTL 0xFD80 #define OCPPARA1 0xFD81 #define OCPPARA2 0xFD82 #define OCPSTAT 0xFD83 #define HS_USB_STAT 0xFE01 #define HS_VCONTROL 0xFE26 #define HS_VSTAIN 0xFE27 #define HS_VLOADM 0xFE28 #define HS_VSTAOUT 0xFE29 #define MC_IRQ 0xFF00 #define MC_IRQEN 0xFF01 #define MC_FIFO_CTL 0xFF02 #define MC_FIFO_BC0 0xFF03 #define MC_FIFO_BC1 0xFF04 #define MC_FIFO_STAT 0xFF05 #define MC_FIFO_MODE 0xFF06 #define MC_FIFO_RD_PTR0 0xFF07 #define MC_FIFO_RD_PTR1 0xFF08 #define MC_DMA_CTL 0xFF10 #define MC_DMA_TC0 0xFF11 #define MC_DMA_TC1 0xFF12 #define MC_DMA_TC2 0xFF13 #define MC_DMA_TC3 0xFF14 #define MC_DMA_RST 0xFF15 #define RBUF_SIZE_MASK 0xFBFF #define RBUF_BASE 0xF000 #define PPBUF_BASE1 0xF800 #define PPBUF_BASE2 0xFA00 /* internal register value macros */ #define POWER_OFF 0x03 #define PARTIAL_POWER_ON 0x02 #define POWER_ON 0x00 #define POWER_MASK 0x03 #define LDO3318_PWR_MASK 0x0C #define LDO_ON 0x00 #define LDO_SUSPEND 0x08 #define LDO_OFF 0x0C #define DV3318_AUTO_PWR_OFF 0x10 #define FORCE_LDO_POWERB 0x60 /* LDO_POWER_CFG */ #define TUNE_SD18_MASK 0x1C #define TUNE_SD18_1V7 0x00 #define TUNE_SD18_1V8 (0x01 << 2) #define TUNE_SD18_1V9 (0x02 << 2) #define TUNE_SD18_2V0 (0x03 << 2) #define TUNE_SD18_2V7 (0x04 << 2) #define TUNE_SD18_2V8 (0x05 << 2) #define TUNE_SD18_2V9 (0x06 << 2) #define TUNE_SD18_3V3 (0x07 << 2) /* CLK_DIV */ #define CLK_CHANGE 0x80 #define CLK_DIV_1 0x00 #define CLK_DIV_2 0x01 #define CLK_DIV_4 0x02 #define CLK_DIV_8 0x03 #define SSC_POWER_MASK 0x01 #define SSC_POWER_DOWN 0x01 #define SSC_POWER_ON 0x00 #define FPGA_VER 0x80 #define HW_VER_MASK 0x0F #define EXTEND_DMA1_ASYNC_SIGNAL 0x02 /* CFG_MODE*/ #define XTAL_FREE 0x80 #define CLK_MODE_MASK 0x03 #define CLK_MODE_12M_XTAL 0x00 #define CLK_MODE_NON_XTAL 0x01 #define CLK_MODE_24M_OSC 0x02 #define CLK_MODE_48M_OSC 0x03 /* CFG_MODE_1*/ #define RTS5179 0x02 #define NYET_EN 0x01 #define NYET_MSAK 0x01 #define SD30_DRIVE_MASK 0x07 #define SD20_DRIVE_MASK 0x03 #define DISABLE_SD_CD 0x08 #define DISABLE_MS_CD 0x10 #define DISABLE_XD_CD 0x20 #define SD_CD_DEGLITCH_EN 0x01 #define MS_CD_DEGLITCH_EN 0x02 #define XD_CD_DEGLITCH_EN 0x04 #define CARD_SHARE_LQFP48 0x04 #define CARD_SHARE_QFN24 0x00 #define CARD_SHARE_LQFP_SEL 0x04 #define CARD_SHARE_XD 0x00 #define CARD_SHARE_SD 0x01 #define CARD_SHARE_MS 0x02 #define CARD_SHARE_MASK 0x03 /* SD30_DRIVE_SEL */ #define DRIVER_TYPE_A 0x05 #define DRIVER_TYPE_B 0x03 #define DRIVER_TYPE_C 0x02 #define DRIVER_TYPE_D 0x01 /* SD_BUS_STAT */ #define SD_CLK_TOGGLE_EN 0x80 #define SD_CLK_FORCE_STOP 0x40 #define SD_DAT3_STATUS 0x10 #define SD_DAT2_STATUS 0x08 #define SD_DAT1_STATUS 0x04 #define SD_DAT0_STATUS 0x02 #define SD_CMD_STATUS 0x01 /* SD_PAD_CTL */ #define SD_IO_USING_1V8 0x80 #define SD_IO_USING_3V3 0x7F #define TYPE_A_DRIVING 0x00 #define TYPE_B_DRIVING 0x01 #define TYPE_C_DRIVING 0x02 #define TYPE_D_DRIVING 0x03 /* CARD_CLK_EN */ #define SD_CLK_EN 0x04 #define MS_CLK_EN 0x08 /* CARD_SELECT */ #define SD_MOD_SEL 2 #define MS_MOD_SEL 3 /* CARD_SHARE_MODE */ #define CARD_SHARE_LQFP48 0x04 #define CARD_SHARE_QFN24 0x00 #define CARD_SHARE_LQFP_SEL 0x04 #define CARD_SHARE_XD 0x00 #define CARD_SHARE_SD 0x01 #define CARD_SHARE_MS 0x02 #define CARD_SHARE_MASK 0x03 /* SSC_CTL1 */ #define SSC_RSTB 0x80 #define SSC_8X_EN 0x40 #define SSC_FIX_FRAC 0x20 #define SSC_SEL_1M 0x00 #define SSC_SEL_2M 0x08 #define SSC_SEL_4M 0x10 #define SSC_SEL_8M 0x18 /* SSC_CTL2 */ #define SSC_DEPTH_MASK 0x03 #define SSC_DEPTH_DISALBE 0x00 #define SSC_DEPTH_2M 0x01 #define SSC_DEPTH_1M 0x02 #define SSC_DEPTH_512K 0x03 /* SD_VPCLK0_CTL */ #define PHASE_CHANGE 0x80 #define PHASE_NOT_RESET 0x40 /* SD_TRANSFER */ #define SD_TRANSFER_START 0x80 #define SD_TRANSFER_END 0x40 #define SD_STAT_IDLE 0x20 #define SD_TRANSFER_ERR 0x10 #define SD_TM_NORMAL_WRITE 0x00 #define SD_TM_AUTO_WRITE_3 0x01 #define SD_TM_AUTO_WRITE_4 0x02 #define SD_TM_AUTO_READ_3 0x05 #define SD_TM_AUTO_READ_4 0x06 #define SD_TM_CMD_RSP 0x08 #define SD_TM_AUTO_WRITE_1 0x09 #define SD_TM_AUTO_WRITE_2 0x0A #define SD_TM_NORMAL_READ 0x0C #define SD_TM_AUTO_READ_1 0x0D #define SD_TM_AUTO_READ_2 0x0E #define SD_TM_AUTO_TUNING 0x0F /* SD_CFG1 */ #define SD_CLK_DIVIDE_0 0x00 #define SD_CLK_DIVIDE_256 0xC0 #define SD_CLK_DIVIDE_128 0x80 #define SD_CLK_DIVIDE_MASK 0xC0 #define SD_BUS_WIDTH_1BIT 0x00 #define SD_BUS_WIDTH_4BIT 0x01 #define SD_BUS_WIDTH_8BIT 0x02 #define SD_ASYNC_FIFO_RST 0x10 #define SD_20_MODE 0x00 #define SD_DDR_MODE 0x04 #define SD_30_MODE 0x08 /* SD_CFG2 */ #define SD_CALCULATE_CRC7 0x00 #define SD_NO_CALCULATE_CRC7 0x80 #define SD_CHECK_CRC16 0x00 #define SD_NO_CHECK_CRC16 0x40 #define SD_WAIT_CRC_TO_EN 0x20 #define SD_WAIT_BUSY_END 0x08 #define SD_NO_WAIT_BUSY_END 0x00 #define SD_CHECK_CRC7 0x00 #define SD_NO_CHECK_CRC7 0x04 #define SD_RSP_LEN_0 0x00 #define SD_RSP_LEN_6 0x01 #define SD_RSP_LEN_17 0x02 #define SD_RSP_TYPE_R0 0x04 #define SD_RSP_TYPE_R1 0x01 #define SD_RSP_TYPE_R1b 0x09 #define SD_RSP_TYPE_R2 0x02 #define SD_RSP_TYPE_R3 0x05 #define SD_RSP_TYPE_R4 0x05 #define SD_RSP_TYPE_R5 0x01 #define SD_RSP_TYPE_R6 0x01 #define SD_RSP_TYPE_R7 0x01 /* SD_STAT1 */ #define SD_CRC7_ERR 0x80 #define SD_CRC16_ERR 0x40 #define SD_CRC_WRITE_ERR 0x20 #define SD_CRC_WRITE_ERR_MASK 0x1C #define GET_CRC_TIME_OUT 0x02 #define SD_TUNING_COMPARE_ERR 0x01 /* SD_DATA_STATE */ #define SD_DATA_IDLE 0x80 /* CARD_DATA_SOURCE */ #define PINGPONG_BUFFER 0x01 #define RING_BUFFER 0x00 /* CARD_OE */ #define SD_OUTPUT_EN 0x04 #define MS_OUTPUT_EN 0x08 /* CARD_STOP */ #define SD_STOP 0x04 #define MS_STOP 0x08 #define SD_CLR_ERR 0x40 #define MS_CLR_ERR 0x80 /* CARD_CLK_SOURCE */ #define CRC_FIX_CLK (0x00 << 0) #define CRC_VAR_CLK0 (0x01 << 0) #define CRC_VAR_CLK1 (0x02 << 0) #define SD30_FIX_CLK (0x00 << 2) #define SD30_VAR_CLK0 (0x01 << 2) #define SD30_VAR_CLK1 (0x02 << 2) #define SAMPLE_FIX_CLK (0x00 << 4) #define SAMPLE_VAR_CLK0 (0x01 << 4) #define SAMPLE_VAR_CLK1 (0x02 << 4) /* SD_SAMPLE_POINT_CTL */ #define DDR_FIX_RX_DAT 0x00 #define DDR_VAR_RX_DAT 0x80 #define DDR_FIX_RX_DAT_EDGE 0x00 #define DDR_FIX_RX_DAT_14_DELAY 0x40 #define DDR_FIX_RX_CMD 0x00 #define DDR_VAR_RX_CMD 0x20 #define DDR_FIX_RX_CMD_POS_EDGE 0x00 #define DDR_FIX_RX_CMD_14_DELAY 0x10 #define SD20_RX_POS_EDGE 0x00 #define SD20_RX_14_DELAY 0x08 #define SD20_RX_SEL_MASK 0x08 /* SD_PUSH_POINT_CTL */ #define DDR_FIX_TX_CMD_DAT 0x00 #define DDR_VAR_TX_CMD_DAT 0x80 #define DDR_FIX_TX_DAT_14_TSU 0x00 #define DDR_FIX_TX_DAT_12_TSU 0x40 #define DDR_FIX_TX_CMD_NEG_EDGE 0x00 #define DDR_FIX_TX_CMD_14_AHEAD 0x20 #define SD20_TX_NEG_EDGE 0x00 #define SD20_TX_14_AHEAD 0x10 #define SD20_TX_SEL_MASK 0x10 #define DDR_VAR_SDCLK_POL_SWAP 0x01 /* MS_CFG */ #define SAMPLE_TIME_RISING 0x00 #define SAMPLE_TIME_FALLING 0x80 #define PUSH_TIME_DEFAULT 0x00 #define PUSH_TIME_ODD 0x40 #define NO_EXTEND_TOGGLE 0x00 #define EXTEND_TOGGLE_CHK 0x20 #define MS_BUS_WIDTH_1 0x00 #define MS_BUS_WIDTH_4 0x10 #define MS_BUS_WIDTH_8 0x18 #define MS_2K_SECTOR_MODE 0x04 #define MS_512_SECTOR_MODE 0x00 #define MS_TOGGLE_TIMEOUT_EN 0x00 #define MS_TOGGLE_TIMEOUT_DISEN 0x01 #define MS_NO_CHECK_INT 0x02 /* MS_TRANS_CFG */ #define WAIT_INT 0x80 #define NO_WAIT_INT 0x00 #define NO_AUTO_READ_INT_REG 0x00 #define AUTO_READ_INT_REG 0x40 #define MS_CRC16_ERR 0x20 #define MS_RDY_TIMEOUT 0x10 #define MS_INT_CMDNK 0x08 #define MS_INT_BREQ 0x04 #define MS_INT_ERR 0x02 #define MS_INT_CED 0x01 /* MS_TRANSFER */ #define MS_TRANSFER_START 0x80 #define MS_TRANSFER_END 0x40 #define MS_TRANSFER_ERR 0x20 #define MS_BS_STATE 0x10 #define MS_TM_READ_BYTES 0x00 #define MS_TM_NORMAL_READ 0x01 #define MS_TM_WRITE_BYTES 0x04 #define MS_TM_NORMAL_WRITE 0x05 #define MS_TM_AUTO_READ 0x08 #define MS_TM_AUTO_WRITE 0x0C #define MS_TM_SET_CMD 0x06 #define MS_TM_COPY_PAGE 0x07 #define MS_TM_MULTI_READ 0x02 #define MS_TM_MULTI_WRITE 0x03 /* MC_FIFO_CTL */ #define FIFO_FLUSH 0x01 /* MC_DMA_RST */ #define DMA_RESET 0x01 /* MC_DMA_CTL */ #define DMA_TC_EQ_0 0x80 #define DMA_DIR_TO_CARD 0x00 #define DMA_DIR_FROM_CARD 0x02 #define DMA_EN 0x01 #define DMA_128 (0 << 2) #define DMA_256 (1 << 2) #define DMA_512 (2 << 2) #define DMA_1024 (3 << 2) #define DMA_PACK_SIZE_MASK 0x0C /* CARD_INT_PEND */ #define XD_INT 0x10 #define MS_INT 0x08 #define SD_INT 0x04 /* LED operations*/ static inline int rtsx_usb_turn_on_led(struct rtsx_ucr *ucr) { return rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x02); } static inline int rtsx_usb_turn_off_led(struct rtsx_ucr *ucr) { return rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x03); } /* HW error clearing */ static inline void rtsx_usb_clear_fsm_err(struct rtsx_ucr *ucr) { rtsx_usb_ep0_write_register(ucr, SFSM_ED, 0xf8, 0xf8); } static inline void rtsx_usb_clear_dma_err(struct rtsx_ucr *ucr) { rtsx_usb_ep0_write_register(ucr, MC_FIFO_CTL, FIFO_FLUSH, FIFO_FLUSH); rtsx_usb_ep0_write_register(ucr, MC_DMA_RST, DMA_RESET, DMA_RESET); } #endif /* __RTS51139_H */ |
| 4 4 1 4 1 2 1 1 1 1 1 1 1 1 1 1 1 1 3 3 1 1 5 3 3 1 3 1 1 1 2 2 2 1 1 1 1 4 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 | // SPDX-License-Identifier: GPL-2.0-only /* * vivid-vbi-cap.c - vbi capture support functions. * * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> #include "vivid-core.h" #include "vivid-kthread-cap.h" #include "vivid-vbi-cap.h" #include "vivid-vbi-gen.h" #include "vivid-vid-common.h" static void vivid_sliced_vbi_cap_fill(struct vivid_dev *dev, unsigned seqnr) { struct vivid_vbi_gen_data *vbi_gen = &dev->vbi_gen; bool is_60hz = dev->std_cap[dev->input] & V4L2_STD_525_60; vivid_vbi_gen_sliced(vbi_gen, is_60hz, seqnr); if (!is_60hz) { if (vivid_vid_can_loop(dev)) { if (dev->vbi_out_have_wss) { vbi_gen->data[12].data[0] = dev->vbi_out_wss[0]; vbi_gen->data[12].data[1] = dev->vbi_out_wss[1]; } else { vbi_gen->data[12].id = 0; } } else { switch (tpg_g_video_aspect(&dev->tpg)) { case TPG_VIDEO_ASPECT_14X9_CENTRE: vbi_gen->data[12].data[0] = 0x01; break; case TPG_VIDEO_ASPECT_16X9_CENTRE: vbi_gen->data[12].data[0] = 0x0b; break; case TPG_VIDEO_ASPECT_16X9_ANAMORPHIC: vbi_gen->data[12].data[0] = 0x07; break; case TPG_VIDEO_ASPECT_4X3: default: vbi_gen->data[12].data[0] = 0x08; break; } } } else if (vivid_vid_can_loop(dev) && is_60hz) { if (dev->vbi_out_have_cc[0]) { vbi_gen->data[0].data[0] = dev->vbi_out_cc[0][0]; vbi_gen->data[0].data[1] = dev->vbi_out_cc[0][1]; } else { vbi_gen->data[0].id = 0; } if (dev->vbi_out_have_cc[1]) { vbi_gen->data[1].data[0] = dev->vbi_out_cc[1][0]; vbi_gen->data[1].data[1] = dev->vbi_out_cc[1][1]; } else { vbi_gen->data[1].id = 0; } } } static void vivid_g_fmt_vbi_cap(struct vivid_dev *dev, struct v4l2_vbi_format *vbi) { bool is_60hz = dev->std_cap[dev->input] & V4L2_STD_525_60; vbi->sampling_rate = 27000000; vbi->offset = 24; vbi->samples_per_line = 1440; vbi->sample_format = V4L2_PIX_FMT_GREY; vbi->start[0] = is_60hz ? V4L2_VBI_ITU_525_F1_START + 9 : V4L2_VBI_ITU_625_F1_START + 5; vbi->start[1] = is_60hz ? V4L2_VBI_ITU_525_F2_START + 9 : V4L2_VBI_ITU_625_F2_START + 5; vbi->count[0] = vbi->count[1] = is_60hz ? 12 : 18; vbi->flags = dev->vbi_cap_interlaced ? V4L2_VBI_INTERLACED : 0; vbi->reserved[0] = 0; vbi->reserved[1] = 0; } void vivid_raw_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf) { struct v4l2_vbi_format vbi; u8 *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0); vivid_g_fmt_vbi_cap(dev, &vbi); buf->vb.sequence = dev->vbi_cap_seq_count; if (dev->field_cap == V4L2_FIELD_ALTERNATE) buf->vb.sequence /= 2; vivid_sliced_vbi_cap_fill(dev, buf->vb.sequence); memset(vbuf, 0x10, vb2_plane_size(&buf->vb.vb2_buf, 0)); if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) vivid_vbi_gen_raw(&dev->vbi_gen, &vbi, vbuf); } void vivid_sliced_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf) { struct v4l2_sliced_vbi_data *vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0); buf->vb.sequence = dev->vbi_cap_seq_count; if (dev->field_cap == V4L2_FIELD_ALTERNATE) buf->vb.sequence /= 2; vivid_sliced_vbi_cap_fill(dev, buf->vb.sequence); memset(vbuf, 0, vb2_plane_size(&buf->vb.vb2_buf, 0)); if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) { unsigned i; for (i = 0; i < 25; i++) vbuf[i] = dev->vbi_gen.data[i]; } } static int vbi_cap_queue_setup(struct vb2_queue *vq, unsigned *nbuffers, unsigned *nplanes, unsigned sizes[], struct device *alloc_devs[]) { struct vivid_dev *dev = vb2_get_drv_priv(vq); bool is_60hz = dev->std_cap[dev->input] & V4L2_STD_525_60; unsigned size = vq->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE ? 36 * sizeof(struct v4l2_sliced_vbi_data) : 1440 * 2 * (is_60hz ? 12 : 18); if (!vivid_is_sdtv_cap(dev)) return -EINVAL; if (*nplanes) return sizes[0] < size ? -EINVAL : 0; sizes[0] = size; *nplanes = 1; return 0; } static int vbi_cap_buf_prepare(struct vb2_buffer *vb) { struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); bool is_60hz = dev->std_cap[dev->input] & V4L2_STD_525_60; unsigned size = vb->vb2_queue->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE ? 36 * sizeof(struct v4l2_sliced_vbi_data) : 1440 * 2 * (is_60hz ? 12 : 18); dprintk(dev, 1, "%s\n", __func__); if (dev->buf_prepare_error) { /* * Error injection: test what happens if buf_prepare() returns * an error. */ dev->buf_prepare_error = false; return -EINVAL; } if (vb2_plane_size(vb, 0) < size) { dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n", __func__, vb2_plane_size(vb, 0), size); return -EINVAL; } vb2_set_plane_payload(vb, 0, size); return 0; } static void vbi_cap_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb); dprintk(dev, 1, "%s\n", __func__); spin_lock(&dev->slock); list_add_tail(&buf->list, &dev->vbi_cap_active); spin_unlock(&dev->slock); } static int vbi_cap_start_streaming(struct vb2_queue *vq, unsigned count) { struct vivid_dev *dev = vb2_get_drv_priv(vq); int err; dprintk(dev, 1, "%s\n", __func__); dev->vbi_cap_seq_count = 0; if (dev->start_streaming_error) { dev->start_streaming_error = false; err = -EINVAL; } else { err = vivid_start_generating_vid_cap(dev, &dev->vbi_cap_streaming); } if (err) { struct vivid_buffer *buf, *tmp; list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) { list_del(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); } } return err; } /* abort streaming and wait for last buffer */ static void vbi_cap_stop_streaming(struct vb2_queue *vq) { struct vivid_dev *dev = vb2_get_drv_priv(vq); dprintk(dev, 1, "%s\n", __func__); vivid_stop_generating_vid_cap(dev, &dev->vbi_cap_streaming); } static void vbi_cap_buf_request_complete(struct vb2_buffer *vb) { struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vbi_cap); } const struct vb2_ops vivid_vbi_cap_qops = { .queue_setup = vbi_cap_queue_setup, .buf_prepare = vbi_cap_buf_prepare, .buf_queue = vbi_cap_buf_queue, .start_streaming = vbi_cap_start_streaming, .stop_streaming = vbi_cap_stop_streaming, .buf_request_complete = vbi_cap_buf_request_complete, }; int vidioc_g_fmt_vbi_cap(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); struct v4l2_vbi_format *vbi = &f->fmt.vbi; if (!vivid_is_sdtv_cap(dev) || !dev->has_raw_vbi_cap) return -EINVAL; vivid_g_fmt_vbi_cap(dev, vbi); return 0; } int vidioc_s_fmt_vbi_cap(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); int ret = vidioc_g_fmt_vbi_cap(file, priv, f); if (ret) return ret; if (f->type != V4L2_BUF_TYPE_VBI_CAPTURE && vb2_is_busy(&dev->vb_vbi_cap_q)) return -EBUSY; return 0; } void vivid_fill_service_lines(struct v4l2_sliced_vbi_format *vbi, u32 service_set) { vbi->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36; vbi->service_set = service_set; memset(vbi->service_lines, 0, sizeof(vbi->service_lines)); memset(vbi->reserved, 0, sizeof(vbi->reserved)); if (vbi->service_set == 0) return; if (vbi->service_set & V4L2_SLICED_CAPTION_525) { vbi->service_lines[0][21] = V4L2_SLICED_CAPTION_525; vbi->service_lines[1][21] = V4L2_SLICED_CAPTION_525; } if (vbi->service_set & V4L2_SLICED_WSS_625) { unsigned i; for (i = 7; i <= 18; i++) vbi->service_lines[0][i] = vbi->service_lines[1][i] = V4L2_SLICED_TELETEXT_B; vbi->service_lines[0][23] = V4L2_SLICED_WSS_625; } } int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct vivid_dev *dev = video_drvdata(file); struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced; if (!vivid_is_sdtv_cap(dev) || !dev->has_sliced_vbi_cap) return -EINVAL; vivid_fill_service_lines(vbi, dev->service_set_cap); return 0; } int vidioc_try_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct vivid_dev *dev = video_drvdata(file); struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced; bool is_60hz = dev->std_cap[dev->input] & V4L2_STD_525_60; u32 service_set = vbi->service_set; if (!vivid_is_sdtv_cap(dev) || !dev->has_sliced_vbi_cap) return -EINVAL; service_set &= is_60hz ? V4L2_SLICED_CAPTION_525 : V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B; vivid_fill_service_lines(vbi, service_set); return 0; } int vidioc_s_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct vivid_dev *dev = video_drvdata(file); struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced; int ret = vidioc_try_fmt_sliced_vbi_cap(file, priv, fmt); if (ret) return ret; if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE && vb2_is_busy(&dev->vb_vbi_cap_q)) return -EBUSY; dev->service_set_cap = vbi->service_set; return 0; } int vidioc_g_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_sliced_vbi_cap *cap) { struct vivid_dev *dev = video_drvdata(file); struct video_device *vdev = video_devdata(file); bool is_60hz; if (vdev->vfl_dir == VFL_DIR_RX) { is_60hz = dev->std_cap[dev->input] & V4L2_STD_525_60; if (!vivid_is_sdtv_cap(dev) || !dev->has_sliced_vbi_cap || cap->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) return -EINVAL; } else { is_60hz = dev->std_out & V4L2_STD_525_60; if (!vivid_is_svid_out(dev) || !dev->has_sliced_vbi_out || cap->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) return -EINVAL; } cap->service_set = is_60hz ? V4L2_SLICED_CAPTION_525 : V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B; if (is_60hz) { cap->service_lines[0][21] = V4L2_SLICED_CAPTION_525; cap->service_lines[1][21] = V4L2_SLICED_CAPTION_525; } else { unsigned i; for (i = 7; i <= 18; i++) cap->service_lines[0][i] = cap->service_lines[1][i] = V4L2_SLICED_TELETEXT_B; cap->service_lines[0][23] = V4L2_SLICED_WSS_625; } return 0; } |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2007 Oracle. All rights reserved. */ #ifndef BTRFS_VOLUMES_H #define BTRFS_VOLUMES_H #include <linux/blk_types.h> #include <linux/blkdev.h> #include <linux/sizes.h> #include <linux/atomic.h> #include <linux/sort.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/log2.h> #include <linux/kobject.h> #include <linux/refcount.h> #include <linux/completion.h> #include <linux/rbtree.h> #include <uapi/linux/btrfs.h> #include <uapi/linux/btrfs_tree.h> #include "messages.h" #include "extent-io-tree.h" struct block_device; struct bdev_handle; struct btrfs_fs_info; struct btrfs_block_group; struct btrfs_trans_handle; struct btrfs_transaction; struct btrfs_zoned_device_info; #define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G) /* * Arbitrary maximum size of one discard request to limit potentially long time * spent in blkdev_issue_discard(). */ #define BTRFS_MAX_DISCARD_CHUNK_SIZE (SZ_1G) extern struct mutex uuid_mutex; #define BTRFS_STRIPE_LEN SZ_64K #define BTRFS_STRIPE_LEN_SHIFT (16) #define BTRFS_STRIPE_LEN_MASK (BTRFS_STRIPE_LEN - 1) static_assert(const_ilog2(BTRFS_STRIPE_LEN) == BTRFS_STRIPE_LEN_SHIFT); /* Used by sanity check for btrfs_raid_types. */ #define const_ffs(n) (__builtin_ctzll(n) + 1) /* * The conversion from BTRFS_BLOCK_GROUP_* bits to btrfs_raid_type requires * RAID0 always to be the lowest profile bit. * Although it's part of on-disk format and should never change, do extra * compile-time sanity checks. */ static_assert(const_ffs(BTRFS_BLOCK_GROUP_RAID0) < const_ffs(BTRFS_BLOCK_GROUP_PROFILE_MASK & ~BTRFS_BLOCK_GROUP_RAID0)); static_assert(const_ilog2(BTRFS_BLOCK_GROUP_RAID0) > ilog2(BTRFS_BLOCK_GROUP_TYPE_MASK)); /* ilog2() can handle both constants and variables */ #define BTRFS_BG_FLAG_TO_INDEX(profile) \ ilog2((profile) >> (ilog2(BTRFS_BLOCK_GROUP_RAID0) - 1)) enum btrfs_raid_types { /* SINGLE is the special one as it doesn't have on-disk bit. */ BTRFS_RAID_SINGLE = 0, BTRFS_RAID_RAID0 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID0), BTRFS_RAID_RAID1 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1), BTRFS_RAID_DUP = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_DUP), BTRFS_RAID_RAID10 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID10), BTRFS_RAID_RAID5 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID5), BTRFS_RAID_RAID6 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID6), BTRFS_RAID_RAID1C3 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1C3), BTRFS_RAID_RAID1C4 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1C4), BTRFS_NR_RAID_TYPES }; /* * Use sequence counter to get consistent device stat data on * 32-bit processors. */ #if BITS_PER_LONG==32 && defined(CONFIG_SMP) #include <linux/seqlock.h> #define __BTRFS_NEED_DEVICE_DATA_ORDERED #define btrfs_device_data_ordered_init(device) \ seqcount_init(&device->data_seqcount) #else #define btrfs_device_data_ordered_init(device) do { } while (0) #endif #define BTRFS_DEV_STATE_WRITEABLE (0) #define BTRFS_DEV_STATE_IN_FS_METADATA (1) #define BTRFS_DEV_STATE_MISSING (2) #define BTRFS_DEV_STATE_REPLACE_TGT (3) #define BTRFS_DEV_STATE_FLUSH_SENT (4) #define BTRFS_DEV_STATE_NO_READA (5) /* Special value encoding failure to write primary super block. */ #define BTRFS_SUPER_PRIMARY_WRITE_ERROR (INT_MAX / 2) struct btrfs_fs_devices; struct btrfs_device { struct list_head dev_list; /* device_list_mutex */ struct list_head dev_alloc_list; /* chunk mutex */ struct list_head post_commit_list; /* chunk mutex */ struct btrfs_fs_devices *fs_devices; struct btrfs_fs_info *fs_info; /* Device path or NULL if missing. */ const char __rcu *name; u64 generation; struct file *bdev_file; struct block_device *bdev; struct btrfs_zoned_device_info *zone_info; /* * Device's major-minor number. Must be set even if the device is not * opened (bdev == NULL), unless the device is missing. */ dev_t devt; unsigned long dev_state; blk_status_t last_flush_error; #ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED seqcount_t data_seqcount; #endif /* the internal btrfs device id */ u64 devid; /* size of the device in memory */ u64 total_bytes; /* size of the device on disk */ u64 disk_total_bytes; /* bytes used */ u64 bytes_used; /* optimal io alignment for this device */ u32 io_align; /* optimal io width for this device */ u32 io_width; /* type and info about this device */ u64 type; /* * Counter of super block write errors, values larger than * BTRFS_SUPER_PRIMARY_WRITE_ERROR encode primary super block write failure. */ atomic_t sb_write_errors; /* minimal io size for this device */ u32 sector_size; /* physical drive uuid (or lvm uuid) */ u8 uuid[BTRFS_UUID_SIZE]; /* * size of the device on the current transaction * * This variant is update when committing the transaction, * and protected by chunk mutex */ u64 commit_total_bytes; /* bytes used on the current transaction */ u64 commit_bytes_used; /* Bio used for flushing device barriers */ struct bio flush_bio; struct completion flush_wait; /* per-device scrub information */ struct scrub_ctx *scrub_ctx; /* disk I/O failure stats. For detailed description refer to * enum btrfs_dev_stat_values in ioctl.h */ int dev_stats_valid; /* Counter to record the change of device stats */ atomic_t dev_stats_ccnt; atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX]; struct extent_io_tree alloc_state; struct completion kobj_unregister; /* For sysfs/FSID/devinfo/devid/ */ struct kobject devid_kobj; /* Bandwidth limit for scrub, in bytes */ u64 scrub_speed_max; }; /* * Block group or device which contains an active swapfile. Used for preventing * unsafe operations while a swapfile is active. * * These are sorted on (ptr, inode) (note that a block group or device can * contain more than one swapfile). We compare the pointer values because we * don't actually care what the object is, we just need a quick check whether * the object exists in the rbtree. */ struct btrfs_swapfile_pin { struct rb_node node; void *ptr; struct inode *inode; /* * If true, ptr points to a struct btrfs_block_group. Otherwise, ptr * points to a struct btrfs_device. */ bool is_block_group; /* * Only used when 'is_block_group' is true and it is the number of * extents used by a swapfile for this block group ('ptr' field). */ int bg_extent_count; }; /* * If we read those variants at the context of their own lock, we needn't * use the following helpers, reading them directly is safe. */ #if BITS_PER_LONG==32 && defined(CONFIG_SMP) #define BTRFS_DEVICE_GETSET_FUNCS(name) \ static inline u64 \ btrfs_device_get_##name(const struct btrfs_device *dev) \ { \ u64 size; \ unsigned int seq; \ \ do { \ seq = read_seqcount_begin(&dev->data_seqcount); \ size = dev->name; \ } while (read_seqcount_retry(&dev->data_seqcount, seq)); \ return size; \ } \ \ static inline void \ btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \ { \ preempt_disable(); \ write_seqcount_begin(&dev->data_seqcount); \ dev->name = size; \ write_seqcount_end(&dev->data_seqcount); \ preempt_enable(); \ } #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) #define BTRFS_DEVICE_GETSET_FUNCS(name) \ static inline u64 \ btrfs_device_get_##name(const struct btrfs_device *dev) \ { \ u64 size; \ \ preempt_disable(); \ size = dev->name; \ preempt_enable(); \ return size; \ } \ \ static inline void \ btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \ { \ preempt_disable(); \ dev->name = size; \ preempt_enable(); \ } #else #define BTRFS_DEVICE_GETSET_FUNCS(name) \ static inline u64 \ btrfs_device_get_##name(const struct btrfs_device *dev) \ { \ return dev->name; \ } \ \ static inline void \ btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \ { \ dev->name = size; \ } #endif BTRFS_DEVICE_GETSET_FUNCS(total_bytes); BTRFS_DEVICE_GETSET_FUNCS(disk_total_bytes); BTRFS_DEVICE_GETSET_FUNCS(bytes_used); enum btrfs_chunk_allocation_policy { BTRFS_CHUNK_ALLOC_REGULAR, BTRFS_CHUNK_ALLOC_ZONED, }; #define BTRFS_DEFAULT_RR_MIN_CONTIG_READ (SZ_256K) /* Keep in sync with raid_attr table, current maximum is RAID1C4. */ #define BTRFS_RAID1_MAX_MIRRORS (4) /* * Read policies for mirrored block group profiles, read picks the stripe based * on these policies. */ enum btrfs_read_policy { /* Use process PID to choose the stripe */ BTRFS_READ_POLICY_PID, #ifdef CONFIG_BTRFS_EXPERIMENTAL /* Balancing RAID1 reads across all striped devices (round-robin). */ BTRFS_READ_POLICY_RR, /* Read from a specific device. */ BTRFS_READ_POLICY_DEVID, #endif BTRFS_NR_READ_POLICY, }; #ifdef CONFIG_BTRFS_EXPERIMENTAL /* * Checksum mode - offload it to workqueues or do it synchronously in * btrfs_submit_chunk(). */ enum btrfs_offload_csum_mode { /* * Choose offloading checksum or do it synchronously automatically. * Do it synchronously if the checksum is fast, or offload to workqueues * otherwise. */ BTRFS_OFFLOAD_CSUM_AUTO, /* Always offload checksum to workqueues. */ BTRFS_OFFLOAD_CSUM_FORCE_ON, /* Never offload checksum to workqueues. */ BTRFS_OFFLOAD_CSUM_FORCE_OFF, }; #endif struct btrfs_fs_devices { u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ /* * UUID written into the btree blocks: * * - If metadata_uuid != fsid then super block must have * BTRFS_FEATURE_INCOMPAT_METADATA_UUID flag set. * * - Following shall be true at all times: * - metadata_uuid == btrfs_header::fsid * - metadata_uuid == btrfs_dev_item::fsid * * - Relations between fsid and metadata_uuid in sb and fs_devices: * - Normal: * fs_devices->fsid == fs_devices->metadata_uuid == sb->fsid * sb->metadata_uuid == 0 * * - When the BTRFS_FEATURE_INCOMPAT_METADATA_UUID flag is set: * fs_devices->fsid == sb->fsid * fs_devices->metadata_uuid == sb->metadata_uuid * * - When in-memory fs_devices->temp_fsid is true * fs_devices->fsid = random * fs_devices->metadata_uuid == sb->fsid */ u8 metadata_uuid[BTRFS_FSID_SIZE]; struct list_head fs_list; /* * Number of devices under this fsid including missing and * replace-target device and excludes seed devices. */ u64 num_devices; /* * The number of devices that successfully opened, including * replace-target, excludes seed devices. */ u64 open_devices; /* The number of devices that are under the chunk allocation list. */ u64 rw_devices; /* Count of missing devices under this fsid excluding seed device. */ u64 missing_devices; u64 total_rw_bytes; /* * Count of devices from btrfs_super_block::num_devices for this fsid, * which includes the seed device, excludes the transient replace-target * device. */ u64 total_devices; /* Highest generation number of seen devices */ u64 latest_generation; /* * The mount device or a device with highest generation after removal * or replace. */ struct btrfs_device *latest_dev; /* * All of the devices in the filesystem, protected by a mutex so we can * safely walk it to write out the super blocks without worrying about * adding/removing by the multi-device code. Scrubbing super block can * kick off supers writing by holding this mutex lock. */ struct mutex device_list_mutex; /* List of all devices, protected by device_list_mutex */ struct list_head devices; /* Devices which can satisfy space allocation. Protected by * chunk_mutex. */ struct list_head alloc_list; struct list_head seed_list; /* Count fs-devices opened. */ int opened; /* * Counter of the processes that are holding this fs_devices but not * yet opened. * This is for mounting handling, as we can only open the fs_devices * after a super block is created. But we cannot take uuid_mutex * during sget_fc(), thus we have to hold the fs_devices (meaning it * cannot be released) until a super block is returned. */ int holding; /* Set when we find or add a device that doesn't have the nonrot flag set. */ bool rotating; /* Devices support TRIM/discard commands. */ bool discardable; /* The filesystem is a seed filesystem. */ bool seeding; /* The mount needs to use a randomly generated fsid. */ bool temp_fsid; /* Enable/disable the filesystem stats tracking. */ bool collect_fs_stats; struct btrfs_fs_info *fs_info; /* sysfs kobjects */ struct kobject fsid_kobj; struct kobject *devices_kobj; struct kobject *devinfo_kobj; struct completion kobj_unregister; enum btrfs_chunk_allocation_policy chunk_alloc_policy; /* Policy used to read the mirrored stripes. */ enum btrfs_read_policy read_policy; #ifdef CONFIG_BTRFS_EXPERIMENTAL /* * Minimum contiguous reads before switching to next device, the unit * is one block/sectorsize. */ u32 rr_min_contig_read; /* Device to be used for reading in case of RAID1. */ u64 read_devid; /* Checksum mode - offload it or do it synchronously. */ enum btrfs_offload_csum_mode offload_csum_mode; #endif }; #define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \ - sizeof(struct btrfs_chunk)) \ / sizeof(struct btrfs_stripe) + 1) #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \ - 2 * sizeof(struct btrfs_disk_key) \ - 2 * sizeof(struct btrfs_chunk)) \ / sizeof(struct btrfs_stripe) + 1) struct btrfs_io_stripe { struct btrfs_device *dev; /* Block mapping. */ u64 physical; bool rst_search_commit_root; /* For the endio handler. */ struct btrfs_io_context *bioc; }; struct btrfs_discard_stripe { struct btrfs_device *dev; u64 physical; u64 length; }; /* * Context for IO submission for device stripe. * * - Track the unfinished mirrors for mirror based profiles * Mirror based profiles are SINGLE/DUP/RAID1/RAID10. * * - Contain the logical -> physical mapping info * Used by submit_stripe_bio() for mapping logical bio * into physical device address. * * - Contain device replace info * Used by handle_ops_on_dev_replace() to copy logical bios * into the new device. * * - Contain RAID56 full stripe logical bytenrs */ struct btrfs_io_context { refcount_t refs; struct btrfs_fs_info *fs_info; /* Taken from struct btrfs_chunk_map::type. */ u64 map_type; struct bio *orig_bio; atomic_t error; u16 max_errors; bool use_rst; u64 logical; u64 size; /* Raid stripe tree ordered entry. */ struct list_head rst_ordered_entry; /* * The total number of stripes, including the extra duplicated * stripe for replace. */ u16 num_stripes; /* * The mirror_num of this bioc. * * This is for reads which use 0 as mirror_num, thus we should return a * valid mirror_num (>0) for the reader. */ u16 mirror_num; /* * The following two members are for dev-replace case only. * * @replace_nr_stripes: Number of duplicated stripes which need to be * written to replace target. * Should be <= 2 (2 for DUP, otherwise <= 1). * @replace_stripe_src: The array indicates where the duplicated stripes * are from. * * The @replace_stripe_src[] array is mostly for RAID56 cases. * As non-RAID56 stripes share the same contents of the mapped range, * thus no need to bother where the duplicated ones are from. * * But for RAID56 case, all stripes contain different contents, thus * we need a way to know the mapping. * * There is an example for the two members, using a RAID5 write: * * num_stripes: 4 (3 + 1 duplicated write) * stripes[0]: dev = devid 1, physical = X * stripes[1]: dev = devid 2, physical = Y * stripes[2]: dev = devid 3, physical = Z * stripes[3]: dev = devid 0, physical = Y * * replace_nr_stripes = 1 * replace_stripe_src = 1 <- Means stripes[1] is involved in replace. * The duplicated stripe index would be * (@num_stripes - 1). * * Note, that we can still have cases replace_nr_stripes = 2 for DUP. * In that case, all stripes share the same content, thus we don't * need to bother @replace_stripe_src value at all. */ u16 replace_nr_stripes; s16 replace_stripe_src; /* * Logical bytenr of the full stripe start, only for RAID56 cases. * * When this value is set to other than (u64)-1, the stripes[] should * follow this pattern: * * (real_stripes = num_stripes - replace_nr_stripes) * (data_stripes = (is_raid6) ? (real_stripes - 2) : (real_stripes - 1)) * * stripes[0]: The first data stripe * stripes[1]: The second data stripe * ... * stripes[data_stripes - 1]: The last data stripe * stripes[data_stripes]: The P stripe * stripes[data_stripes + 1]: The Q stripe (only for RAID6). */ u64 full_stripe_logical; struct btrfs_io_stripe stripes[]; }; struct btrfs_device_info { struct btrfs_device *dev; u64 dev_offset; u64 max_avail; u64 total_avail; }; struct btrfs_raid_attr { u8 sub_stripes; /* sub_stripes info for map */ u8 dev_stripes; /* stripes per dev */ u8 devs_max; /* max devs to use */ u8 devs_min; /* min devs needed */ u8 tolerated_failures; /* max tolerated fail devs */ u8 devs_increment; /* ndevs has to be a multiple of this */ u8 ncopies; /* how many copies to data has */ u8 nparity; /* number of stripes worth of bytes to store * parity information */ u8 mindev_error; /* error code if min devs requisite is unmet */ const char raid_name[8]; /* name of the raid */ u64 bg_flag; /* block group flag of the raid */ }; extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES]; struct btrfs_chunk_map { struct rb_node rb_node; /* For mount time dev extent verification. */ int verified_stripes; refcount_t refs; u64 start; u64 chunk_len; u64 stripe_size; u64 type; int io_align; int io_width; int num_stripes; int sub_stripes; struct btrfs_io_stripe stripes[]; }; #define btrfs_chunk_map_size(n) (sizeof(struct btrfs_chunk_map) + \ (sizeof(struct btrfs_io_stripe) * (n))) static inline void btrfs_free_chunk_map(struct btrfs_chunk_map *map) { if (map && refcount_dec_and_test(&map->refs)) { ASSERT(RB_EMPTY_NODE(&map->rb_node)); kfree(map); } } struct btrfs_balance_control { struct btrfs_balance_args data; struct btrfs_balance_args meta; struct btrfs_balance_args sys; u64 flags; struct btrfs_balance_progress stat; }; /* * Search for a given device by the set parameters */ struct btrfs_dev_lookup_args { u64 devid; u8 *uuid; u8 *fsid; bool missing; }; /* We have to initialize to -1 because BTRFS_DEV_REPLACE_DEVID is 0 */ #define BTRFS_DEV_LOOKUP_ARGS_INIT { .devid = (u64)-1 } #define BTRFS_DEV_LOOKUP_ARGS(name) \ struct btrfs_dev_lookup_args name = BTRFS_DEV_LOOKUP_ARGS_INIT enum btrfs_map_op { BTRFS_MAP_READ, BTRFS_MAP_WRITE, BTRFS_MAP_GET_READ_MIRRORS, }; static inline enum btrfs_map_op btrfs_op(const struct bio *bio) { switch (bio_op(bio)) { case REQ_OP_WRITE: case REQ_OP_ZONE_APPEND: return BTRFS_MAP_WRITE; default: WARN_ON_ONCE(1); fallthrough; case REQ_OP_READ: return BTRFS_MAP_READ; } } static inline unsigned long btrfs_chunk_item_size(int num_stripes) { ASSERT(num_stripes); return sizeof(struct btrfs_chunk) + sizeof(struct btrfs_stripe) * (num_stripes - 1); } /* * Do the type safe conversion from stripe_nr to offset inside the chunk. * * @stripe_nr is u32, with left shift it can overflow u32 for chunks larger * than 4G. This does the proper type cast to avoid overflow. */ static inline u64 btrfs_stripe_nr_to_offset(u32 stripe_nr) { return (u64)stripe_nr << BTRFS_STRIPE_LEN_SHIFT; } void btrfs_get_bioc(struct btrfs_io_context *bioc); void btrfs_put_bioc(struct btrfs_io_context *bioc); int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, u64 logical, u64 *length, struct btrfs_io_context **bioc_ret, struct btrfs_io_stripe *smap, int *mirror_num_ret); int btrfs_map_repair_block(struct btrfs_fs_info *fs_info, struct btrfs_io_stripe *smap, u64 logical, u32 length, int mirror_num); struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, u64 logical, u64 *length_ret, u32 *num_stripes); int btrfs_read_sys_array(struct btrfs_fs_info *fs_info); int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info); struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, struct btrfs_space_info *space_info, u64 type); void btrfs_mapping_tree_free(struct btrfs_fs_info *fs_info); int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, blk_mode_t flags, void *holder); struct btrfs_device *btrfs_scan_one_device(const char *path, bool mount_arg_dev); int btrfs_forget_devices(dev_t devt); void btrfs_close_devices(struct btrfs_fs_devices *fs_devices); void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices); void btrfs_assign_next_active_device(struct btrfs_device *device, struct btrfs_device *this_dev); struct btrfs_device *btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid, const char *devpath); int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, struct btrfs_dev_lookup_args *args, const char *path); struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, const u64 *devid, const u8 *uuid, const char *path); void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args); int btrfs_rm_device(struct btrfs_fs_info *fs_info, struct btrfs_dev_lookup_args *args, struct file **bdev_file); void __exit btrfs_cleanup_fs_uuids(void); int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len); int btrfs_grow_device(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 new_size); struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, const struct btrfs_dev_lookup_args *args); int btrfs_shrink_device(struct btrfs_device *device, u64 new_size); int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path); int btrfs_balance(struct btrfs_fs_info *fs_info, struct btrfs_balance_control *bctl, struct btrfs_ioctl_balance_args *bargs); void btrfs_describe_block_groups(u64 flags, char *buf, u32 size_buf); int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info); int btrfs_recover_balance(struct btrfs_fs_info *fs_info); int btrfs_pause_balance(struct btrfs_fs_info *fs_info); int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset, bool verbose); int btrfs_cancel_balance(struct btrfs_fs_info *fs_info); bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset); void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index); int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_get_dev_stats *stats); int btrfs_init_devices_late(struct btrfs_fs_info *fs_info); int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info); int btrfs_run_dev_stats(struct btrfs_trans_handle *trans); void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev); void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev); void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev); unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, u64 logical); u64 btrfs_calc_stripe_length(const struct btrfs_chunk_map *map); int btrfs_nr_parity_stripes(u64 type); int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, struct btrfs_block_group *bg); int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset); #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS struct btrfs_chunk_map *btrfs_alloc_chunk_map(int num_stripes, gfp_t gfp); int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map); #endif struct btrfs_chunk_map *btrfs_find_chunk_map(struct btrfs_fs_info *fs_info, u64 logical, u64 length); struct btrfs_chunk_map *btrfs_find_chunk_map_nolock(struct btrfs_fs_info *fs_info, u64 logical, u64 length); struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, u64 logical, u64 length); void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map); struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, int copy_num, bool drop_cache); void btrfs_release_disk_super(struct btrfs_super_block *super); static inline void btrfs_dev_stat_inc(struct btrfs_device *dev, int index) { atomic_inc(dev->dev_stat_values + index); /* * This memory barrier orders stores updating statistics before stores * updating dev_stats_ccnt. * * It pairs with smp_rmb() in btrfs_run_dev_stats(). */ smp_mb__before_atomic(); atomic_inc(&dev->dev_stats_ccnt); } static inline int btrfs_dev_stat_read(struct btrfs_device *dev, int index) { return atomic_read(dev->dev_stat_values + index); } static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev, int index) { int ret; ret = atomic_xchg(dev->dev_stat_values + index, 0); /* * atomic_xchg implies a full memory barriers as per atomic_t.txt: * - RMW operations that have a return value are fully ordered; * * This implicit memory barriers is paired with the smp_rmb in * btrfs_run_dev_stats */ atomic_inc(&dev->dev_stats_ccnt); return ret; } static inline void btrfs_dev_stat_set(struct btrfs_device *dev, int index, unsigned long val) { atomic_set(dev->dev_stat_values + index, val); /* * This memory barrier orders stores updating statistics before stores * updating dev_stats_ccnt. * * It pairs with smp_rmb() in btrfs_run_dev_stats(). */ smp_mb__before_atomic(); atomic_inc(&dev->dev_stats_ccnt); } static inline const char *btrfs_dev_name(const struct btrfs_device *device) { if (!device || test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) return "<missing disk>"; else return rcu_dereference(device->name); } static inline void btrfs_warn_unknown_chunk_allocation(enum btrfs_chunk_allocation_policy pol) { WARN_ONCE(1, "unknown allocation policy %d, fallback to regular", pol); } static inline void btrfs_fs_devices_inc_holding(struct btrfs_fs_devices *fs_devices) { lockdep_assert_held(&uuid_mutex); ASSERT(fs_devices->holding >= 0); fs_devices->holding++; } static inline void btrfs_fs_devices_dec_holding(struct btrfs_fs_devices *fs_devices) { lockdep_assert_held(&uuid_mutex); ASSERT(fs_devices->holding > 0); fs_devices->holding--; } void btrfs_commit_device_sizes(struct btrfs_transaction *trans); struct list_head * __attribute_const__ btrfs_get_fs_uuids(void); bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, struct btrfs_device *failing_dev); void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, struct btrfs_device *device); enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags); int btrfs_bg_type_to_factor(u64 flags); const char *btrfs_bg_type_to_raid_name(u64 flags); int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info); bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical); bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr); const u8 *btrfs_sb_fsid_ptr(const struct btrfs_super_block *sb); #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info, u64 logical, u16 total_stripes); #endif #endif |
| 1 7 7 1 4 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM jbd2 #if !defined(_TRACE_JBD2_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_JBD2_H #include <linux/jbd2.h> #include <linux/tracepoint.h> struct transaction_chp_stats_s; struct transaction_run_stats_s; TRACE_EVENT(jbd2_checkpoint, TP_PROTO(journal_t *journal, int result), TP_ARGS(journal, result), TP_STRUCT__entry( __field( dev_t, dev ) __field( int, result ) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->result = result; ), TP_printk("dev %d,%d result %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->result) ); DECLARE_EVENT_CLASS(jbd2_commit, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction), TP_STRUCT__entry( __field( dev_t, dev ) __field( char, sync_commit ) __field( tid_t, transaction ) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->sync_commit = commit_transaction->t_synchronous_commit; __entry->transaction = commit_transaction->t_tid; ), TP_printk("dev %d,%d transaction %u sync %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->transaction, __entry->sync_commit) ); DEFINE_EVENT(jbd2_commit, jbd2_start_commit, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction) ); DEFINE_EVENT(jbd2_commit, jbd2_commit_locking, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction) ); DEFINE_EVENT(jbd2_commit, jbd2_commit_flushing, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction) ); DEFINE_EVENT(jbd2_commit, jbd2_commit_logging, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction) ); DEFINE_EVENT(jbd2_commit, jbd2_drop_transaction, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction) ); TRACE_EVENT(jbd2_end_commit, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction), TP_STRUCT__entry( __field( dev_t, dev ) __field( char, sync_commit ) __field( tid_t, transaction ) __field( tid_t, head ) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->sync_commit = commit_transaction->t_synchronous_commit; __entry->transaction = commit_transaction->t_tid; __entry->head = journal->j_tail_sequence; ), TP_printk("dev %d,%d transaction %u sync %d head %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->transaction, __entry->sync_commit, __entry->head) ); TRACE_EVENT(jbd2_submit_inode_data, TP_PROTO(struct inode *inode), TP_ARGS(inode), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; ), TP_printk("dev %d,%d ino %lu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino) ); DECLARE_EVENT_CLASS(jbd2_handle_start_class, TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int requested_blocks), TP_ARGS(dev, tid, type, line_no, requested_blocks), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tid ) __field( unsigned int, type ) __field( unsigned int, line_no ) __field( int, requested_blocks) ), TP_fast_assign( __entry->dev = dev; __entry->tid = tid; __entry->type = type; __entry->line_no = line_no; __entry->requested_blocks = requested_blocks; ), TP_printk("dev %d,%d tid %u type %u line_no %u " "requested_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->type, __entry->line_no, __entry->requested_blocks) ); DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_start, TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int requested_blocks), TP_ARGS(dev, tid, type, line_no, requested_blocks) ); DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_restart, TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int requested_blocks), TP_ARGS(dev, tid, type, line_no, requested_blocks) ); TRACE_EVENT(jbd2_handle_extend, TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int buffer_credits, int requested_blocks), TP_ARGS(dev, tid, type, line_no, buffer_credits, requested_blocks), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tid ) __field( unsigned int, type ) __field( unsigned int, line_no ) __field( int, buffer_credits ) __field( int, requested_blocks) ), TP_fast_assign( __entry->dev = dev; __entry->tid = tid; __entry->type = type; __entry->line_no = line_no; __entry->buffer_credits = buffer_credits; __entry->requested_blocks = requested_blocks; ), TP_printk("dev %d,%d tid %u type %u line_no %u " "buffer_credits %d requested_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->type, __entry->line_no, __entry->buffer_credits, __entry->requested_blocks) ); TRACE_EVENT(jbd2_handle_stats, TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int interval, int sync, int requested_blocks, int dirtied_blocks), TP_ARGS(dev, tid, type, line_no, interval, sync, requested_blocks, dirtied_blocks), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tid ) __field( unsigned int, type ) __field( unsigned int, line_no ) __field( int, interval ) __field( int, sync ) __field( int, requested_blocks) __field( int, dirtied_blocks ) ), TP_fast_assign( __entry->dev = dev; __entry->tid = tid; __entry->type = type; __entry->line_no = line_no; __entry->interval = interval; __entry->sync = sync; __entry->requested_blocks = requested_blocks; __entry->dirtied_blocks = dirtied_blocks; ), TP_printk("dev %d,%d tid %u type %u line_no %u interval %d " "sync %d requested_blocks %d dirtied_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->type, __entry->line_no, __entry->interval, __entry->sync, __entry->requested_blocks, __entry->dirtied_blocks) ); TRACE_EVENT(jbd2_run_stats, TP_PROTO(dev_t dev, tid_t tid, struct transaction_run_stats_s *stats), TP_ARGS(dev, tid, stats), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tid ) __field( unsigned long, wait ) __field( unsigned long, request_delay ) __field( unsigned long, running ) __field( unsigned long, locked ) __field( unsigned long, flushing ) __field( unsigned long, logging ) __field( __u32, handle_count ) __field( __u32, blocks ) __field( __u32, blocks_logged ) ), TP_fast_assign( __entry->dev = dev; __entry->tid = tid; __entry->wait = stats->rs_wait; __entry->request_delay = stats->rs_request_delay; __entry->running = stats->rs_running; __entry->locked = stats->rs_locked; __entry->flushing = stats->rs_flushing; __entry->logging = stats->rs_logging; __entry->handle_count = stats->rs_handle_count; __entry->blocks = stats->rs_blocks; __entry->blocks_logged = stats->rs_blocks_logged; ), TP_printk("dev %d,%d tid %u wait %u request_delay %u running %u " "locked %u flushing %u logging %u handle_count %u " "blocks %u blocks_logged %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, jiffies_to_msecs(__entry->wait), jiffies_to_msecs(__entry->request_delay), jiffies_to_msecs(__entry->running), jiffies_to_msecs(__entry->locked), jiffies_to_msecs(__entry->flushing), jiffies_to_msecs(__entry->logging), __entry->handle_count, __entry->blocks, __entry->blocks_logged) ); TRACE_EVENT(jbd2_checkpoint_stats, TP_PROTO(dev_t dev, tid_t tid, struct transaction_chp_stats_s *stats), TP_ARGS(dev, tid, stats), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tid ) __field( unsigned long, chp_time ) __field( __u32, forced_to_close ) __field( __u32, written ) __field( __u32, dropped ) ), TP_fast_assign( __entry->dev = dev; __entry->tid = tid; __entry->chp_time = stats->cs_chp_time; __entry->forced_to_close= stats->cs_forced_to_close; __entry->written = stats->cs_written; __entry->dropped = stats->cs_dropped; ), TP_printk("dev %d,%d tid %u chp_time %u forced_to_close %u " "written %u dropped %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, jiffies_to_msecs(__entry->chp_time), __entry->forced_to_close, __entry->written, __entry->dropped) ); TRACE_EVENT(jbd2_update_log_tail, TP_PROTO(journal_t *journal, tid_t first_tid, unsigned long block_nr, unsigned long freed), TP_ARGS(journal, first_tid, block_nr, freed), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tail_sequence ) __field( tid_t, first_tid ) __field(unsigned long, block_nr ) __field(unsigned long, freed ) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->tail_sequence = journal->j_tail_sequence; __entry->first_tid = first_tid; __entry->block_nr = block_nr; __entry->freed = freed; ), TP_printk("dev %d,%d from %u to %u offset %lu freed %lu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tail_sequence, __entry->first_tid, __entry->block_nr, __entry->freed) ); TRACE_EVENT(jbd2_write_superblock, TP_PROTO(journal_t *journal, blk_opf_t write_flags), TP_ARGS(journal, write_flags), TP_STRUCT__entry( __field( dev_t, dev ) __field( blk_opf_t, write_flags ) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->write_flags = write_flags; ), TP_printk("dev %d,%d write_flags %x", MAJOR(__entry->dev), MINOR(__entry->dev), (__force u32)__entry->write_flags) ); TRACE_EVENT(jbd2_lock_buffer_stall, TP_PROTO(dev_t dev, unsigned long stall_ms), TP_ARGS(dev, stall_ms), TP_STRUCT__entry( __field( dev_t, dev ) __field(unsigned long, stall_ms ) ), TP_fast_assign( __entry->dev = dev; __entry->stall_ms = stall_ms; ), TP_printk("dev %d,%d stall_ms %lu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->stall_ms) ); DECLARE_EVENT_CLASS(jbd2_journal_shrink, TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count), TP_ARGS(journal, nr_to_scan, count), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long, nr_to_scan) __field(unsigned long, count) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->nr_to_scan = nr_to_scan; __entry->count = count; ), TP_printk("dev %d,%d nr_to_scan %lu count %lu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_to_scan, __entry->count) ); DEFINE_EVENT(jbd2_journal_shrink, jbd2_shrink_count, TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count), TP_ARGS(journal, nr_to_scan, count) ); DEFINE_EVENT(jbd2_journal_shrink, jbd2_shrink_scan_enter, TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count), TP_ARGS(journal, nr_to_scan, count) ); TRACE_EVENT(jbd2_shrink_scan_exit, TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long nr_shrunk, unsigned long count), TP_ARGS(journal, nr_to_scan, nr_shrunk, count), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long, nr_to_scan) __field(unsigned long, nr_shrunk) __field(unsigned long, count) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->nr_to_scan = nr_to_scan; __entry->nr_shrunk = nr_shrunk; __entry->count = count; ), TP_printk("dev %d,%d nr_to_scan %lu nr_shrunk %lu count %lu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_to_scan, __entry->nr_shrunk, __entry->count) ); TRACE_EVENT(jbd2_shrink_checkpoint_list, TP_PROTO(journal_t *journal, tid_t first_tid, tid_t tid, tid_t last_tid, unsigned long nr_freed, tid_t next_tid), TP_ARGS(journal, first_tid, tid, last_tid, nr_freed, next_tid), TP_STRUCT__entry( __field(dev_t, dev) __field(tid_t, first_tid) __field(tid_t, tid) __field(tid_t, last_tid) __field(unsigned long, nr_freed) __field(tid_t, next_tid) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->first_tid = first_tid; __entry->tid = tid; __entry->last_tid = last_tid; __entry->nr_freed = nr_freed; __entry->next_tid = next_tid; ), TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu " "next transaction %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->first_tid, __entry->tid, __entry->last_tid, __entry->nr_freed, __entry->next_tid) ); #endif /* _TRACE_JBD2_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
| 4 1 4 3 2 4 2 1 2 4 4 2 1 1 1 4 1 1 2 1 1 1 1 1 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 | // SPDX-License-Identifier: GPL-2.0-or-later /* * DivIO nw80x subdriver * * Copyright (C) 2011 Jean-François Moine (http://moinejf.free.fr) * Copyright (C) 2003 Sylvain Munaut <tnt@246tNt.com> * Kjell Claesson <keyson@users.sourceforge.net> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "nw80x" #include "gspca.h" MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>"); MODULE_DESCRIPTION("NW80x USB Camera Driver"); MODULE_LICENSE("GPL"); static int webcam; /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ u32 ae_res; s8 ag_cnt; #define AG_CNT_START 13 u8 exp_too_low_cnt; u8 exp_too_high_cnt; u8 bridge; u8 webcam; }; enum bridges { BRIDGE_NW800, /* and et31x110 */ BRIDGE_NW801, BRIDGE_NW802, }; enum webcams { Generic800, SpaceCam, /* Trust 120 SpaceCam */ SpaceCam2, /* other Trust 120 SpaceCam */ Cvideopro, /* Conceptronic Video Pro */ Dlink350c, DS3303u, Kr651us, Kritter, Mustek300, Proscope, Twinkle, DvcV6, P35u, Generic802, NWEBCAMS /* number of webcams */ }; static const u8 webcam_chip[NWEBCAMS] = { [Generic800] = BRIDGE_NW800, /* 06a5:0000 * Typhoon Webcam 100 USB */ [SpaceCam] = BRIDGE_NW800, /* 06a5:d800 * Trust SpaceCam120 or SpaceCam100 PORTABLE */ [SpaceCam2] = BRIDGE_NW800, /* 06a5:d800 - pas106 * other Trust SpaceCam120 or SpaceCam100 PORTABLE */ [Cvideopro] = BRIDGE_NW802, /* 06a5:d001 * Conceptronic Video Pro 'CVIDEOPRO USB Webcam CCD' */ [Dlink350c] = BRIDGE_NW802, /* 06a5:d001 * D-Link NetQam Pro 250plus */ [DS3303u] = BRIDGE_NW801, /* 06a5:d001 * Plustek Opticam 500U or ProLink DS3303u */ [Kr651us] = BRIDGE_NW802, /* 06a5:d001 * Panasonic GP-KR651US */ [Kritter] = BRIDGE_NW802, /* 06a5:d001 * iRez Kritter cam */ [Mustek300] = BRIDGE_NW802, /* 055f:d001 * Mustek Wcam 300 mini */ [Proscope] = BRIDGE_NW802, /* 06a5:d001 * Scalar USB Microscope (ProScope) */ [Twinkle] = BRIDGE_NW800, /* 06a5:d800 - hv7121b? (seems pas106) * Divio Chicony TwinkleCam * DSB-C110 */ [DvcV6] = BRIDGE_NW802, /* 0502:d001 * DVC V6 */ [P35u] = BRIDGE_NW801, /* 052b:d001, 06a5:d001 and 06be:d001 * EZCam Pro p35u */ [Generic802] = BRIDGE_NW802, }; /* * other webcams: * - nw801 046d:d001 * Logitech QuickCam Pro (dark focus ring) * - nw801 0728:d001 * AVerMedia Camguard * - nw??? 06a5:d001 * D-Link NetQam Pro 250plus * - nw800 065a:d800 * Showcam NGS webcam * - nw??? ????:???? * Sceptre svc300 */ /* * registers * nw800/et31x110 nw801 nw802 * 0000..009e 0000..00a1 0000..009e * 0200..0211 id id * 0300..0302 id id * 0400..0406 (inex) 0400..0406 * 0500..0505 0500..0506 (inex) * 0600..061a 0600..0601 0600..0601 * 0800..0814 id id * 1000..109c 1000..10a1 1000..109a */ /* resolutions * nw800: 320x240, 352x288 * nw801/802: 320x240, 640x480 */ static const struct v4l2_pix_format cif_mode[] = { {320, 240, V4L2_PIX_FMT_JPGL, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 4 / 8, .colorspace = V4L2_COLORSPACE_JPEG}, {352, 288, V4L2_PIX_FMT_JPGL, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 4 / 8, .colorspace = V4L2_COLORSPACE_JPEG} }; static const struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_JPGL, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 4 / 8, .colorspace = V4L2_COLORSPACE_JPEG}, {640, 480, V4L2_PIX_FMT_JPGL, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8, .colorspace = V4L2_COLORSPACE_JPEG}, }; /* * The sequences below contain: * - 1st and 2nd bytes: either * - register number (BE) * - I2C0 + i2c address * - 3rd byte: data length (=0 for end of sequence) * - n bytes: data */ #define I2C0 0xff static const u8 nw800_init[] = { 0x04, 0x05, 0x01, 0x61, 0x04, 0x04, 0x01, 0x01, 0x04, 0x06, 0x01, 0x04, 0x04, 0x04, 0x03, 0x00, 0x00, 0x00, 0x05, 0x05, 0x01, 0x00, 0, 0, 0 }; static const u8 nw800_start[] = { 0x04, 0x06, 0x01, 0xc0, 0x00, 0x00, 0x40, 0x10, 0x43, 0x00, 0xb4, 0x01, 0x10, 0x00, 0x4f, 0xef, 0x0e, 0x00, 0x74, 0x01, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x01, 0x00, 0x19, 0x00, 0x3e, 0x00, 0x24, 0x03, 0x3e, 0x00 |